├── README.md ├── elfk+xpack ├── namespace.yaml ├── img │ ├── elfk-1.png │ ├── elfk-2.png │ ├── elfk-3.png │ ├── elfk-4.png │ ├── elfk-5.png │ └── elfk-6.png ├── elasticsearch │ └── elastic-certificates.p12 ├── kibana │ ├── config.yaml │ └── kibana.yaml ├── elasticsearch-head │ └── head.yaml ├── filebeat │ ├── filebeat-config.yaml │ ├── filebeat-nginx.yaml │ └── filebeat-tomcat.yaml ├── logstash │ ├── logstash.yaml │ └── config.yaml └── alicloud-nas-elfk-pv.yaml ├── weave-scope ├── weave-ns.yaml ├── manifests │ ├── weave-scope-app-ing.yaml │ ├── weave-scope-app-svc.yaml │ ├── weave-scope-app-deploy.yaml │ ├── weave-scope-cluster-agent-deploy.yaml │ ├── weave-scope-rbac.yaml │ └── weave-scope-agent-ds.yaml └── README.md ├── zabbix ├── namespace.yaml ├── img │ ├── zabbix-1.png │ ├── zabbix-2.png │ ├── zabbix-3.png │ └── zabbix-4.png ├── nfs-mysql-pv.yaml ├── zabbix-web │ └── zabbix-web.yaml ├── mysql │ └── mysql.yaml └── zabbix-server │ └── zabbix-server.yaml ├── gitlab ├── img │ ├── gitlab-1.png │ └── gitlab-2.png ├── public-service-ns.yaml ├── upgrade-check │ ├── chart-info-cm.yaml │ └── upgrade-check-job.yaml ├── gitaly │ ├── gitaly-pdb.yaml │ ├── gitaly-svc.yaml │ └── gitaly-cm.yaml ├── minio │ ├── minio-pdb.yaml │ ├── minio-svc.yaml │ ├── minio-pvc.yaml │ ├── minio-ing.yaml │ ├── minio-create-buckets-job.yaml │ └── minio-deploy.yaml ├── shell │ ├── shell-pdb.yaml │ ├── shell-svc.yaml │ ├── shell-sshd-cm.yaml │ └── shell-cm.yaml ├── registry │ ├── registry-pdb.yaml │ ├── registry-svc.yaml │ ├── registry-ing.yaml │ └── registry-cm.yaml ├── webservice │ ├── webservice-pdb.yaml │ ├── webservice-svc.yaml │ ├── webservice-ing.yaml │ └── workhorse-cm.yaml ├── sidekiq │ ├── sidekiq-allin1-pdb.yaml │ └── sidekiq-allin1-cm.yaml ├── redis │ ├── redis-svc.yaml │ ├── redis-headless-svc.yaml │ ├── redis-cm.yaml │ └── redis-health-cm.yaml ├── postgresql │ ├── postgresql-svc.yaml │ ├── postgresql-headless-svc.yaml │ └── postgresql-init-db-cm.yaml ├── shared-secrets │ ├── shared-secrets-job.yaml │ └── shared-secrets-rbac.yaml ├── gitlab-pv.yaml └── migrations │ └── migrations-cm.yaml ├── harbor ├── img │ ├── harbor-1.png │ ├── harbor-2.png │ └── harbor-3.png ├── harbor │ ├── .helmignore │ ├── templates │ │ ├── NOTES.txt │ │ ├── database │ │ │ ├── database-secret.yaml │ │ │ └── database-svc.yaml │ │ ├── redis │ │ │ └── service.yaml │ │ ├── trivy │ │ │ ├── trivy-secret.yaml │ │ │ ├── trivy-svc.yaml │ │ │ └── trivy-tls.yaml │ │ ├── jobservice │ │ │ ├── jobservice-secrets.yaml │ │ │ ├── jobservice-svc.yaml │ │ │ ├── jobservice-tls.yaml │ │ │ ├── jobservice-cm-env.yaml │ │ │ ├── jobservice-pvc.yaml │ │ │ └── jobservice-cm.yaml │ │ ├── portal │ │ │ ├── service.yaml │ │ │ ├── tls.yaml │ │ │ ├── configmap.yaml │ │ │ └── deployment.yaml │ │ ├── clair │ │ │ ├── clair-svc.yaml │ │ │ ├── clair-secret.yaml │ │ │ └── clair-tls.yaml │ │ ├── registry │ │ │ ├── registry-svc.yaml │ │ │ ├── registry-tls.yaml │ │ │ ├── registry-pvc.yaml │ │ │ └── registry-secret.yaml │ │ ├── chartmuseum │ │ │ ├── chartmuseum-svc.yaml │ │ │ ├── chartmuseum-tls.yaml │ │ │ ├── chartmuseum-secret.yaml │ │ │ └── chartmuseum-pvc.yaml │ │ ├── core │ │ │ ├── core-svc.yaml │ │ │ ├── core-tls.yaml │ │ │ ├── core-secret.yaml │ │ │ └── core-cm.yaml │ │ ├── ingress │ │ │ └── secret.yaml │ │ ├── notary │ │ │ ├── notary-secret.yaml │ │ │ ├── notary-svc.yaml │ │ │ ├── notary-signer.yaml │ │ │ └── notary-server.yaml │ │ └── nginx │ │ │ ├── secret.yaml │ │ │ └── service.yaml │ ├── conf │ │ ├── notary-signer.json │ │ ├── clair.yaml │ │ └── notary-server.json │ ├── Chart.yaml │ ├── .github │ │ └── workflows │ │ │ ├── unittest.yaml │ │ │ ├── lint.yaml │ │ │ └── integration.yaml │ └── cert │ │ └── tls.crt ├── public-service-ns.yaml └── harbor-pv.yaml ├── elfk ├── public-service-ns.yaml ├── filebeat │ ├── nginx-svc.yaml │ ├── tomcat-svc.yaml │ ├── nginx-ing.yaml │ ├── tomcat-ing.yaml │ ├── filebeat-config.yaml │ ├── nginx-deploy.yaml │ └── tomcat-deploy.yaml ├── kibana │ ├── kibana-svc.yaml │ ├── kibana-ing.yaml │ └── kibana-deploy.yaml ├── logstash │ ├── logstash-svc.yaml │ ├── logstash-cm.yaml │ └── logstash-deploy.yaml ├── elasticsearch │ ├── elasticsearch-svc.yaml │ └── elasticsearch-sts.yaml └── README.md ├── kafka ├── public-service-ns.yaml ├── kafka-pdb.yaml ├── zookeeper │ ├── zookeeper-pdb.yaml │ ├── zookeeper-cm.yaml │ └── zookeeper-svc.yaml ├── kafka-svc.yaml └── Dockerfile ├── redis ├── public-service-ns.yaml ├── redis.conf ├── redis-svc.yaml ├── redis-pv.yaml └── redis-sts.yaml ├── apollo ├── public-service-ns.yaml ├── apollo-adminservice │ ├── apollo-adminservice-svc.yaml │ ├── apollo-adminservice-cm.yaml │ └── apollo-adminservice-deploy.yaml ├── apollo-configservice │ ├── apollo-configservice-svc.yaml │ ├── apollo-configservice-cm.yaml │ └── apollo-configservice-deploy.yaml ├── apollo-portal │ ├── apollo-portal-svc.yaml │ ├── apollo-portal-ing.yaml │ ├── apollo-portal-cm.yaml │ └── apollo-portal-deploy.yaml └── README.md ├── consul ├── public-service-ns.yaml ├── client │ ├── consul-client-cm.yaml │ └── consul-client-ds.yaml ├── server │ ├── consul-server-cm.yaml │ ├── consul-server-pdb.yaml │ ├── consul-ui-svc.yaml │ ├── consul-ui-ing.yaml │ ├── consul-dns-svc.yaml │ ├── consul-server-svc.yaml │ └── consul-server-sts.yaml └── README.md ├── prometheus ├── public-service-ns.yaml ├── grafana │ ├── grafana-secret.yaml │ ├── grafana-ing.yaml │ ├── grafana-svc.yaml │ └── grafana-deploy.yaml ├── prometheus │ ├── prometheus-ing.yaml │ ├── prometheus-svc.yaml │ ├── prometheus-rbac.yaml │ ├── prometheus-deploy.yaml │ └── rules.yaml ├── alertmanager │ ├── alertmanager-ing.yaml │ ├── alertmanager-svc.yaml │ ├── alertmanager-cm.yaml │ └── alertmanager-deploy.yaml ├── dingtalk │ ├── dingtalk-cm.yaml │ ├── dingtalk-svc.yaml │ └── dingtalk-deploy.yaml ├── node-exporter │ ├── node-exporter-svc.yaml │ └── node-exporter-ds.yaml ├── k8s-components │ ├── kube-proxy-prometheus-discovery.yaml │ ├── kube-scheduler-prometheus-discovery.yaml │ └── kube-controller-manager-prometheus-discovery.yaml ├── blackbox-exporter │ ├── blackbox-exporter-svc.yaml │ ├── blackbox-exporter-cm.yaml │ └── blackbox-exporter-deploy.yaml └── kube-state-metrics │ ├── kube-state-metrics-svc.yaml │ ├── kube-state-metrics-deploy.yaml │ └── kube-state-metrics-rbac.yaml ├── rabbitmq ├── public-service-ns.yaml ├── rabbitmq-secret.yaml ├── rabbitmq-pvc.yaml ├── rabbitmq-sc.yaml ├── rabbitmq-ing.yaml ├── rabbitmq-svc.yaml ├── rabbitmq-rbac.yaml ├── README.md └── rabbitmq-cm.yaml ├── zookeeper ├── public-service-ns.yaml ├── zookeeper-pdb.yaml ├── zookeeper-cm.yaml └── zookeeper-svc.yaml ├── gitlab-runner ├── img │ ├── runner-1.png │ └── runner-2.png ├── public-service-ns.yaml ├── gitlab-runner-secret.yaml ├── gitlab-runner-rbac.yaml ├── README.md └── gitlab-runner-cm.yaml └── LICENSE /README.md: -------------------------------------------------------------------------------- 1 | ## Kubernetes 2 | Kubernetes manifests, deploy common software in kubernetes. 3 | -------------------------------------------------------------------------------- /elfk+xpack/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: log 5 | -------------------------------------------------------------------------------- /weave-scope/weave-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: weave 5 | -------------------------------------------------------------------------------- /zabbix/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /gitlab/img/gitlab-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/gitlab/img/gitlab-1.png -------------------------------------------------------------------------------- /gitlab/img/gitlab-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/gitlab/img/gitlab-2.png -------------------------------------------------------------------------------- /harbor/img/harbor-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/harbor/img/harbor-1.png -------------------------------------------------------------------------------- /harbor/img/harbor-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/harbor/img/harbor-2.png -------------------------------------------------------------------------------- /harbor/img/harbor-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/harbor/img/harbor-3.png -------------------------------------------------------------------------------- /zabbix/img/zabbix-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/zabbix/img/zabbix-1.png -------------------------------------------------------------------------------- /zabbix/img/zabbix-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/zabbix/img/zabbix-2.png -------------------------------------------------------------------------------- /zabbix/img/zabbix-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/zabbix/img/zabbix-3.png -------------------------------------------------------------------------------- /zabbix/img/zabbix-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/zabbix/img/zabbix-4.png -------------------------------------------------------------------------------- /elfk+xpack/img/elfk-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/img/elfk-1.png -------------------------------------------------------------------------------- /elfk+xpack/img/elfk-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/img/elfk-2.png -------------------------------------------------------------------------------- /elfk+xpack/img/elfk-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/img/elfk-3.png -------------------------------------------------------------------------------- /elfk+xpack/img/elfk-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/img/elfk-4.png -------------------------------------------------------------------------------- /elfk+xpack/img/elfk-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/img/elfk-5.png -------------------------------------------------------------------------------- /elfk+xpack/img/elfk-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/img/elfk-6.png -------------------------------------------------------------------------------- /elfk/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /harbor/harbor/.helmignore: -------------------------------------------------------------------------------- 1 | docs/* 2 | .git/* 3 | .gitignore 4 | CONTRIBUTING.md 5 | .travis.yaml 6 | test/* -------------------------------------------------------------------------------- /kafka/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /redis/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /apollo/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /consul/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /gitlab/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /harbor/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /prometheus/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /rabbitmq/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /zookeeper/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /gitlab-runner/img/runner-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/gitlab-runner/img/runner-1.png -------------------------------------------------------------------------------- /gitlab-runner/img/runner-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/gitlab-runner/img/runner-2.png -------------------------------------------------------------------------------- /gitlab-runner/public-service-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: public-service 5 | -------------------------------------------------------------------------------- /elfk+xpack/elasticsearch/elastic-certificates.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tobewont/kubernetes/HEAD/elfk+xpack/elasticsearch/elastic-certificates.p12 -------------------------------------------------------------------------------- /consul/client/consul-client-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: consul-client-config 5 | namespace: public-service 6 | data: 7 | -------------------------------------------------------------------------------- /consul/server/consul-server-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: consul-server-config 5 | namespace: public-service 6 | data: 7 | -------------------------------------------------------------------------------- /redis/redis.conf: -------------------------------------------------------------------------------- 1 | appendonly yes 2 | cluster-enabled yes 3 | cluster-config-file /var/lib/redis/nodes.conf 4 | cluster-node-timeout 5000 5 | dir /var/lib/redis 6 | port 6379 7 | -------------------------------------------------------------------------------- /kafka/kafka-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: kafka-pdb 5 | namespace: public-service 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: kafka 10 | minAvailable: 2 11 | -------------------------------------------------------------------------------- /zookeeper/zookeeper-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: zk-pdb 5 | namespace: public-service 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: zk 10 | minAvailable: 2 11 | -------------------------------------------------------------------------------- /harbor/harbor/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Please wait for several minutes for Harbor deployment to complete. 2 | Then you should be able to visit the Harbor portal at {{ .Values.externalURL }} 3 | For more details, please visit https://github.com/goharbor/harbor 4 | -------------------------------------------------------------------------------- /kafka/zookeeper/zookeeper-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: zk-pdb 5 | namespace: public-service 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: zk 10 | minAvailable: 2 11 | -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: rmq-cluster-secret 5 | namespace: public-service 6 | stringData: 7 | cookie: ERLANG_COOKIE 8 | username: admin 9 | password: admin123 10 | type: Opaque 11 | -------------------------------------------------------------------------------- /elfk/filebeat/nginx-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx 5 | namespace: default 6 | labels: 7 | app: nginx 8 | spec: 9 | selector: 10 | app: nginx 11 | ports: 12 | - port: 80 13 | targetPort: 80 14 | -------------------------------------------------------------------------------- /prometheus/grafana/grafana-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: grafana 5 | namespace: public-service 6 | data: 7 | admin-password: YWRtaW4= # base64 加解密 8 | admin-username: YWRtaW4= 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /elfk/filebeat/tomcat-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: tomcat 5 | namespace: default 6 | labels: 7 | app: tomcat 8 | spec: 9 | selector: 10 | app: tomcat 11 | ports: 12 | - port: 8080 13 | targetPort: 8080 14 | -------------------------------------------------------------------------------- /kafka/kafka-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka 5 | namespace: public-service 6 | labels: 7 | app: kafka 8 | spec: 9 | selector: 10 | app: kafka 11 | ports: 12 | - port: 9092 13 | name: server 14 | clusterIP: None 15 | -------------------------------------------------------------------------------- /consul/server/consul-server-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: consul-server 5 | namespace: public-service 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: consul 10 | component: server 11 | minAvailable: 2 12 | -------------------------------------------------------------------------------- /elfk/kibana/kibana-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana 5 | namespace: public-service 6 | labels: 7 | app: kibana 8 | spec: 9 | selector: 10 | app: kibana 11 | ports: 12 | - port: 5601 13 | protocol: TCP 14 | targetPort: 5601 15 | -------------------------------------------------------------------------------- /gitlab/upgrade-check/chart-info-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-chart-info 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab 9 | data: 10 | gitlabVersion: "13.4.3" 11 | gitlabChartVersion: "4.4.3" 12 | -------------------------------------------------------------------------------- /zabbix/nfs-mysql-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mysql-pv 5 | labels: 6 | pvname: nfs-mysql-pv 7 | spec: 8 | capacity: 9 | storage: 20Gi 10 | accessModes: 11 | - ReadWriteMany 12 | nfs: 13 | server: 192.168.30.129 14 | path: /data/mysql 15 | -------------------------------------------------------------------------------- /elfk/logstash/logstash-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: logstash 5 | namespace: public-service 6 | spec: 7 | selector: 8 | app: logstash 9 | ports: 10 | - port: 5044 11 | protocol: TCP 12 | targetPort: 5044 13 | clusterIP: 10.96.103.207 #指定clusterIP,方便使用 14 | -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: rabbitmq-cluster-storage 5 | namespace: public-service 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | storageClassName: alicloud-nas-subpath-public 13 | -------------------------------------------------------------------------------- /consul/server/consul-ui-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: consul-ui 5 | namespace: public-service 6 | labels: 7 | app: consul 8 | component: server 9 | spec: 10 | selector: 11 | app: consul 12 | ports: 13 | - name: http 14 | port: 80 15 | targetPort: 8500 16 | -------------------------------------------------------------------------------- /gitlab/gitaly/gitaly-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: gitlab-gitaly 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitaly 9 | spec: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | app: gitlab 14 | component: gitaly 15 | -------------------------------------------------------------------------------- /gitlab/minio/minio-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: gitlab-minio-v1 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: minio 9 | spec: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | app: gitlab 14 | component: minio 15 | -------------------------------------------------------------------------------- /zookeeper/zookeeper-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: zk-config 5 | namespace: public-service 6 | data: 7 | ensemble: "zk-0;zk-1;zk-2" 8 | replicas: "3" 9 | jvm.heap: "512M" 10 | tick: "2000" 11 | init: "10" 12 | sync: "5" 13 | client.cnxns: "60" 14 | snap.retain: "3" 15 | purge.interval: "1" 16 | -------------------------------------------------------------------------------- /elfk/filebeat/nginx-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: nginx 5 | namespace: default 6 | spec: 7 | rules: 8 | - host: nginx.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: nginx 14 | servicePort: 80 15 | -------------------------------------------------------------------------------- /gitlab/shell/shell-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: gitlab-shell 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab-shell 9 | spec: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | app: gitlab 14 | component: gitlab-shell 15 | -------------------------------------------------------------------------------- /kafka/zookeeper/zookeeper-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: zk-config 5 | namespace: public-service 6 | data: 7 | ensemble: "zk-0;zk-1;zk-2" 8 | replicas: "3" 9 | jvm.heap: "512M" 10 | tick: "2000" 11 | init: "10" 12 | sync: "5" 13 | client.cnxns: "60" 14 | snap.retain: "3" 15 | purge.interval: "1" 16 | -------------------------------------------------------------------------------- /prometheus/grafana/grafana-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: grafana 5 | namespace: public-service 6 | spec: 7 | rules: 8 | - host: grafana.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: grafana 14 | servicePort: 3000 15 | -------------------------------------------------------------------------------- /elfk/filebeat/tomcat-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: tomcat 5 | namespace: default 6 | spec: 7 | rules: 8 | - host: tomcat.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: tomcat 14 | servicePort: 8080 15 | -------------------------------------------------------------------------------- /gitlab/registry/registry-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: gitlab-registry-v1 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: registry 9 | spec: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | app: gitlab 14 | component: registry 15 | -------------------------------------------------------------------------------- /elfk/elasticsearch/elasticsearch-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch 5 | namespace: public-service 6 | labels: 7 | app: elasticsearch 8 | spec: 9 | selector: 10 | app: elasticsearch 11 | ports: 12 | - name: api 13 | port: 9200 14 | - name: discovery 15 | port: 9300 16 | clusterIP: None 17 | -------------------------------------------------------------------------------- /elfk/kibana/kibana-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: kibana 5 | namespace: public-service 6 | spec: 7 | rules: 8 | - host: kibana.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: kibana 14 | servicePort: 5601 15 | -------------------------------------------------------------------------------- /gitlab/webservice/webservice-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: gitlab-webservice 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: webservice 9 | spec: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | app: gitlab 14 | component: webservice 15 | -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-sc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: alicloud-nas-subpath-public 5 | provisioner: nasplugin.csi.alibabacloud.com 6 | mountOptions: 7 | - nolock,tcp,noresvport 8 | - vers=4 9 | parameters: 10 | volumeAs: subpath 11 | server: "xxxxxx.cn-hangzhou.nas.aliyuncs.com:/" 12 | reclaimPolicy: Retain 13 | -------------------------------------------------------------------------------- /consul/server/consul-ui-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: consul 5 | namespace: public-service 6 | spec: 7 | rules: 8 | - host: consul.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: consul-ui 14 | servicePort: 80 15 | -------------------------------------------------------------------------------- /gitlab/sidekiq/sidekiq-allin1-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: gitlab-sidekiq-all-in-1-v1 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: sidekiq 9 | spec: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | app: gitlab 14 | component: sidekiq 15 | -------------------------------------------------------------------------------- /prometheus/prometheus/prometheus-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: prometheus 5 | namespace: public-service 6 | spec: 7 | rules: 8 | - host: prometheus.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: prometheus 14 | servicePort: 9090 15 | -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: rabbitmq 5 | namespace: public-service 6 | spec: 7 | rules: 8 | - host: rabbitmq.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: rmq-cluster 14 | servicePort: 15672 15 | -------------------------------------------------------------------------------- /prometheus/alertmanager/alertmanager-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: alertmanager 5 | namespace: public-service 6 | spec: 7 | rules: 8 | - host: alertmanager.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: alertmanager 14 | servicePort: 9093 15 | -------------------------------------------------------------------------------- /apollo/apollo-adminservice/apollo-adminservice-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: apollo-adminservice 5 | namespace: public-service 6 | labels: 7 | app: apollo-adminservice 8 | spec: 9 | selector: 10 | app: apollo-adminservice 11 | ports: 12 | - name: http 13 | protocol: TCP 14 | port: 8090 15 | targetPort: 8090 16 | -------------------------------------------------------------------------------- /harbor/harbor/templates/database/database-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.database.type "internal" -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: "{{ template "harbor.database" . }}" 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | type: Opaque 9 | data: 10 | POSTGRES_PASSWORD: {{ template "harbor.database.encryptedPassword" . }} 11 | {{- end -}} 12 | -------------------------------------------------------------------------------- /weave-scope/manifests/weave-scope-app-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: weave-scope 5 | namespace: weave 6 | spec: 7 | rules: 8 | - host: scope.lzxlinux.com 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: weave-scope-app 14 | servicePort: 80 15 | -------------------------------------------------------------------------------- /apollo/apollo-configservice/apollo-configservice-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: apollo-configservice 5 | namespace: public-service 6 | labels: 7 | app: apollo-configservice 8 | spec: 9 | selector: 10 | app: apollo-configservice 11 | ports: 12 | - name: http 13 | protocol: TCP 14 | port: 8080 15 | targetPort: 8080 16 | -------------------------------------------------------------------------------- /apollo/apollo-portal/apollo-portal-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: apollo-portal 5 | namespace: public-service 6 | labels: 7 | app: apollo-portal 8 | spec: 9 | selector: 10 | app: apollo-portal 11 | sessionAffinity: ClientIP 12 | ports: 13 | - name: http 14 | protocol: TCP 15 | port: 8070 16 | targetPort: 8070 17 | -------------------------------------------------------------------------------- /prometheus/dingtalk/dingtalk-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dingtalk-config 5 | namespace: public-service 6 | data: 7 | config.yml: |- 8 | targets: 9 | webhook: 10 | url: https://oapi.dingtalk.com/robot/send?access_token=xxxxxxxxxxxx #修改为钉钉机器人的webhook 11 | mention: 12 | all: true #@所有人 13 | -------------------------------------------------------------------------------- /prometheus/dingtalk/dingtalk-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: dingtalk 5 | namespace: public-service 6 | labels: 7 | app: dingtalk 8 | annotations: 9 | prometheus.io/scrape: 'false' 10 | spec: 11 | selector: 12 | app: dingtalk 13 | ports: 14 | - name: dingtalk 15 | port: 8060 16 | protocol: TCP 17 | targetPort: 8060 18 | -------------------------------------------------------------------------------- /gitlab-runner/gitlab-runner-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: gitlab-runner 5 | namespace: public-service 6 | labels: 7 | app: gitlab-runner 8 | type: Opaque 9 | data: 10 | runner-registration-token: "SHR5WTFBRW9JNUtzUmpKek9aQUExNGVrVUo4YUppQTFDYzA0OWNBWkMwTURGWTVFeU4wa09jdmJUd25DRzdzbA==" #注册token,base64加密 11 | runner-token: "" 12 | -------------------------------------------------------------------------------- /harbor/harbor/templates/redis/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.redis.type "internal" -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "harbor.redis" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - port: 6379 11 | selector: 12 | {{ include "harbor.matchLabels" . | indent 4 }} 13 | component: redis 14 | {{- end -}} -------------------------------------------------------------------------------- /gitlab/redis/redis-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-redis-master 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: redis 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - name: redis 13 | port: 6379 14 | targetPort: redis 15 | selector: 16 | app: gitlab 17 | component: redis 18 | role: master 19 | -------------------------------------------------------------------------------- /prometheus/prometheus/prometheus-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: public-service 6 | labels: 7 | app: prometheus 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | app: prometheus 13 | ports: 14 | - name: prometheus 15 | port: 9090 16 | protocol: TCP 17 | targetPort: 9090 18 | -------------------------------------------------------------------------------- /gitlab/gitaly/gitaly-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-gitaly 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitaly 9 | spec: 10 | type: ClusterIP 11 | clusterIP: None 12 | ports: 13 | - port: 8075 14 | name: gitaly 15 | targetPort: 8075 16 | selector: 17 | app: gitlab 18 | component: gitaly 19 | -------------------------------------------------------------------------------- /apollo/apollo-adminservice/apollo-adminservice-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: apollo-adminservice 5 | namespace: public-service 6 | data: 7 | application-github.properties: | 8 | spring.datasource.url = jdbc:mysql://192.168.30.131:3306/ApolloConfigDB?characterEncoding=utf8 9 | spring.datasource.username = root 10 | spring.datasource.password = 123456789 11 | -------------------------------------------------------------------------------- /gitlab/minio/minio-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-minio-svc 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: minio 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - name: service 13 | port: 9000 14 | targetPort: 9000 15 | protocol: TCP 16 | selector: 17 | app: gitlab 18 | component: minio 19 | -------------------------------------------------------------------------------- /gitlab/registry/registry-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-registry 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: registry 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - port: 5000 13 | targetPort: 5000 14 | protocol: TCP 15 | name: registry 16 | selector: 17 | app: gitlab 18 | component: registry 19 | -------------------------------------------------------------------------------- /gitlab/shell/shell-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-shell 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab-shell 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - port: 22 13 | targetPort: 2222 14 | protocol: TCP 15 | name: ssh 16 | selector: 17 | app: gitlab 18 | component: gitlab-shell 19 | -------------------------------------------------------------------------------- /gitlab/redis/redis-headless-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-redis-headless 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: redis 9 | spec: 10 | type: ClusterIP 11 | clusterIP: None 12 | ports: 13 | - name: redis 14 | port: 6379 15 | targetPort: redis 16 | selector: 17 | app: gitlab 18 | component: redis 19 | -------------------------------------------------------------------------------- /harbor/harbor/templates/database/database-svc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.database.type "internal" -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: "{{ template "harbor.database" . }}" 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - port: 5432 11 | selector: 12 | {{ include "harbor.matchLabels" . | indent 4 }} 13 | component: database 14 | {{- end -}} -------------------------------------------------------------------------------- /prometheus/alertmanager/alertmanager-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: alertmanager 5 | namespace: public-service 6 | labels: 7 | name: alertmanager 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | app: alertmanager 13 | ports: 14 | - name: alertmanager 15 | port: 9093 16 | protocol: TCP 17 | targetPort: 9093 18 | -------------------------------------------------------------------------------- /elfk+xpack/kibana/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kibana-config 5 | namespace: log 6 | data: 7 | kibana.yml: | 8 | server.port: 5601 9 | server.host: "0" 10 | kibana.index: ".kibana" 11 | elasticsearch.hosts: ["http://elasticsearch:9200"] 12 | elasticsearch.username: kibana_system 13 | elasticsearch.password: elk-2021 14 | i18n.locale: "zh-CN" 15 | -------------------------------------------------------------------------------- /gitlab/minio/minio-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: gitlab-minio 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: minio 9 | spec: 10 | accessModes: 11 | - "ReadWriteOnce" 12 | resources: 13 | requests: 14 | storage: "10Gi" 15 | selector: 16 | matchLabels: 17 | app: gitlab 18 | component: minio 19 | -------------------------------------------------------------------------------- /prometheus/grafana/grafana-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: public-service 6 | labels: 7 | app: grafana 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | prometheus.io/path: '/metrics' 11 | spec: 12 | selector: 13 | app: grafana 14 | ports: 15 | - name: grafana 16 | port: 3000 17 | protocol: TCP 18 | targetPort: 3000 19 | -------------------------------------------------------------------------------- /gitlab/sidekiq/sidekiq-allin1-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-sidekiq-all-in-1 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: sidekiq 9 | queue_pod_name: all-in-1 10 | data: 11 | sidekiq_queues.yml.erb: | 12 | <%= 13 | sq = YAML.load_file('/srv/gitlab/config/sidekiq_queues.yml') 14 | 15 | sq.to_yaml 16 | %> 17 | -------------------------------------------------------------------------------- /harbor/harbor/templates/trivy/trivy-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.trivy.enabled }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "harbor.trivy" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | type: Opaque 9 | data: 10 | redisURL: {{ include "harbor.redis.urlForTrivy" . | b64enc }} 11 | gitHubToken: {{ .Values.trivy.gitHubToken | default "" | b64enc | quote }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /apollo/apollo-portal/apollo-portal-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: apollo-portal 5 | namespace: public-service 6 | labels: 7 | app: apollo-portal 8 | spec: 9 | rules: 10 | - host: apollo.lzxlinux.com 11 | http: 12 | paths: 13 | - path: / 14 | backend: 15 | serviceName: apollo-portal 16 | servicePort: 8070 17 | -------------------------------------------------------------------------------- /gitlab/minio/minio-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: gitlab-minio 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: minio 9 | spec: 10 | rules: 11 | - host: minio.lzxlinux.com 12 | http: 13 | paths: 14 | - path: / 15 | backend: 16 | serviceName: gitlab-minio-svc 17 | servicePort: 9000 18 | -------------------------------------------------------------------------------- /gitlab/postgresql/postgresql-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-postgresql 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: postgresql 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - name: tcp-postgresql 13 | port: 5432 14 | targetPort: tcp-postgresql 15 | selector: 16 | app: gitlab 17 | component: postgresql 18 | role: master 19 | -------------------------------------------------------------------------------- /harbor/harbor/conf/notary-signer.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": { 3 | "grpc_addr": ":7899", 4 | "tls_cert_file": "/etc/ssl/notary/tls.crt", 5 | "tls_key_file": "/etc/ssl/notary/tls.key" 6 | }, 7 | "logging": { 8 | "level": "{{ .Values.logLevel }}" 9 | }, 10 | "storage": { 11 | "backend": "postgres", 12 | "db_url": "{{ template "harbor.database.notarySigner" . }}", 13 | "default_alias": "defaultalias" 14 | } 15 | } -------------------------------------------------------------------------------- /harbor/harbor/templates/jobservice/jobservice-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "{{ template "harbor.jobservice" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | JOBSERVICE_SECRET: {{ .Values.jobservice.secret | default (randAlphaNum 16) | b64enc | quote }} 10 | REGISTRY_CREDENTIAL_PASSWORD: {{ .Values.registry.credentials.password | b64enc | quote }} 11 | -------------------------------------------------------------------------------- /prometheus/node-exporter/node-exporter-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-exporter 5 | namespace: public-service 6 | labels: 7 | app: node-exporter 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | app: node-exporter 13 | ports: 14 | - name: node-exporter 15 | port: 9100 16 | protocol: TCP 17 | targetPort: 9100 18 | clusterIP: None 19 | -------------------------------------------------------------------------------- /harbor/harbor/templates/portal/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "{{ template "harbor.portal" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | spec: 8 | ports: 9 | - port: {{ template "harbor.portal.servicePort" . }} 10 | targetPort: {{ template "harbor.portal.containerPort" . }} 11 | selector: 12 | {{ include "harbor.matchLabels" . | indent 4 }} 13 | component: portal 14 | -------------------------------------------------------------------------------- /gitlab/registry/registry-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: gitlab-registry 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: registry 9 | spec: 10 | rules: 11 | - host: registry.lzxlinux.com 12 | http: 13 | paths: 14 | - path: / 15 | backend: 16 | serviceName: gitlab-registry 17 | servicePort: 5000 18 | -------------------------------------------------------------------------------- /consul/server/consul-dns-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: consul-dns 5 | namespace: public-service 6 | labels: 7 | app: consul 8 | component: dns 9 | spec: 10 | selector: 11 | app: consul 12 | ports: 13 | - name: dns-tcp 14 | protocol: TCP 15 | port: 53 16 | targetPort: dns-tcp 17 | - name: dns-udp 18 | protocol: UDP 19 | port: 53 20 | targetPort: dns-udp 21 | -------------------------------------------------------------------------------- /gitlab/postgresql/postgresql-headless-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-postgresql-headless 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: postgresql 9 | spec: 10 | type: ClusterIP 11 | clusterIP: None 12 | ports: 13 | - name: tcp-postgresql 14 | port: 5432 15 | targetPort: tcp-postgresql 16 | selector: 17 | app: gitlab 18 | component: postgresql 19 | -------------------------------------------------------------------------------- /harbor/harbor/templates/clair/clair-svc.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.clair.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: "{{ template "harbor.clair" . }}" 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - name: adapter 11 | port: {{ include "harbor.clairAdapter.servicePort" . }} 12 | selector: 13 | {{ include "harbor.matchLabels" . | indent 4 }} 14 | component: clair 15 | {{ end }} 16 | -------------------------------------------------------------------------------- /prometheus/k8s-components/kube-proxy-prometheus-discovery.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-proxy-prometheus-discovery 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-proxy 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | k8s-app: kube-proxy 13 | ports: 14 | - name: http-metrics 15 | port: 10249 16 | protocol: TCP 17 | targetPort: 10249 18 | clusterIP: None 19 | -------------------------------------------------------------------------------- /harbor/harbor/templates/clair/clair-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.clair.enabled }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "harbor.clair" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | type: Opaque 9 | data: 10 | config.yaml: {{ tpl (.Files.Get "conf/clair.yaml") . | b64enc }} 11 | redis: {{ include "harbor.redis.urlForClair" . | b64enc }} 12 | database: {{ include "harbor.database.clair" . | b64enc }} 13 | {{- end }} -------------------------------------------------------------------------------- /harbor/harbor/templates/jobservice/jobservice-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "{{ template "harbor.jobservice" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | spec: 8 | ports: 9 | - port: {{ template "harbor.jobservice.servicePort" . }} 10 | targetPort: {{ template "harbor.jobservice.containerPort" . }} 11 | selector: 12 | {{ include "harbor.matchLabels" . | indent 4 }} 13 | component: jobservice 14 | -------------------------------------------------------------------------------- /prometheus/blackbox-exporter/blackbox-exporter-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: blackbox-exporter 5 | namespace: public-service 6 | labels: 7 | app: blackbox-exporter 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | app: blackbox-exporter 13 | ports: 14 | - name: blackbox 15 | port: 9115 16 | protocol: TCP 17 | targetPort: 9115 18 | nodePort: 30115 19 | type: NodePort 20 | -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rmq-cluster 5 | namespace: public-service 6 | labels: 7 | app: rmq-cluster 8 | spec: 9 | selector: 10 | app: rmq-cluster 11 | ports: 12 | - name: http 13 | port: 15672 14 | protocol: TCP 15 | targetPort: 15672 16 | - name: amqp 17 | port: 5672 18 | protocol: TCP 19 | targetPort: 5672 20 | clusterIP: 172.21.11.245 #指定clusterIP,方便使用 21 | -------------------------------------------------------------------------------- /harbor/harbor/templates/trivy/trivy-svc.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.trivy.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: "{{ template "harbor.trivy" . }}" 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - name: api-server 11 | protocol: TCP 12 | port: {{ template "harbor.trivy.servicePort" . }} 13 | selector: 14 | {{ include "harbor.matchLabels" . | indent 4 }} 15 | component: trivy 16 | {{ end }} 17 | -------------------------------------------------------------------------------- /prometheus/k8s-components/kube-scheduler-prometheus-discovery.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-scheduler-prometheus-discovery 5 | namespace: kube-system 6 | labels: 7 | component: kube-scheduler 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | component: kube-scheduler 13 | ports: 14 | - name: http-metrics 15 | port: 10251 16 | protocol: TCP 17 | targetPort: 10251 18 | clusterIP: None 19 | -------------------------------------------------------------------------------- /harbor/harbor/templates/registry/registry-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "{{ template "harbor.registry" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | spec: 8 | ports: 9 | - name: registry 10 | port: {{ template "harbor.registry.servicePort" . }} 11 | - name: controller 12 | port: {{ template "harbor.registryctl.servicePort" . }} 13 | selector: 14 | {{ include "harbor.matchLabels" . | indent 4 }} 15 | component: registry -------------------------------------------------------------------------------- /apollo/apollo-portal/apollo-portal-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: apollo-portal 5 | namespace: public-service 6 | data: 7 | application-github.properties: | 8 | spring.datasource.url = jdbc:mysql://192.168.30.131:3306/ApolloPortalDB?characterEncoding=utf8 9 | spring.datasource.username = root 10 | spring.datasource.password = 123456789 11 | apollo.portal.envs = dev 12 | apollo-env.properties: | 13 | dev.meta = http://apollo-configservice:8080 14 | -------------------------------------------------------------------------------- /prometheus/k8s-components/kube-controller-manager-prometheus-discovery.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-controller-manager-prometheus-discovery 5 | namespace: kube-system 6 | labels: 7 | component: kube-controller-manager 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | spec: 11 | selector: 12 | component: kube-controller-manager 13 | ports: 14 | - name: http-metrics 15 | port: 10252 16 | targetPort: 10252 17 | protocol: TCP 18 | clusterIP: None 19 | -------------------------------------------------------------------------------- /harbor/harbor/templates/chartmuseum/chartmuseum-svc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.chartmuseum.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: "{{ template "harbor.chartmuseum" . }}" 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - port: {{ template "harbor.chartmuseum.servicePort" . }} 11 | targetPort: {{ template "harbor.chartmuseum.containerPort" . }} 12 | selector: 13 | {{ include "harbor.matchLabels" . | indent 4 }} 14 | component: chartmuseum 15 | {{- end }} -------------------------------------------------------------------------------- /harbor/harbor/templates/core/core-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "harbor.core" . }} 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | spec: 8 | {{- if (eq .Values.expose.ingress.controller "gce") }} 9 | type: NodePort 10 | {{- end }} 11 | ports: 12 | - port: {{ template "harbor.core.servicePort" . }} 13 | targetPort: {{ template "harbor.core.containerPort" . }} 14 | selector: 15 | {{ include "harbor.matchLabels" . | indent 4 }} 16 | component: core 17 | -------------------------------------------------------------------------------- /gitlab/redis/redis-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-redis 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: redis 9 | data: 10 | redis.conf: |- 11 | appendonly yes 12 | save "" 13 | 14 | master.conf: |- 15 | dir /data 16 | rename-command FLUSHDB "" 17 | rename-command FLUSHALL "" 18 | 19 | replica.conf: |- 20 | dir /data 21 | slave-read-only yes 22 | rename-command FLUSHDB "" 23 | rename-command FLUSHALL "" 24 | -------------------------------------------------------------------------------- /gitlab/webservice/webservice-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitlab-webservice 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: webservice 9 | spec: 10 | type: ClusterIP 11 | ports: 12 | - port: 8080 13 | targetPort: 8080 14 | protocol: TCP 15 | name: http-webservice 16 | - port: 8181 17 | targetPort: 8181 18 | protocol: TCP 19 | name: http-workhorse 20 | selector: 21 | app: gitlab 22 | component: webservice 23 | -------------------------------------------------------------------------------- /weave-scope/manifests/weave-scope-app-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: weave-scope-app 5 | namespace: weave 6 | labels: 7 | name: weave-scope-app 8 | app: weave-scope 9 | weave-cloud-component: scope 10 | weave-scope-component: app 11 | spec: 12 | ports: 13 | - name: app 14 | port: 80 15 | protocol: TCP 16 | targetPort: 4040 17 | selector: 18 | name: weave-scope-app 19 | app: weave-scope 20 | weave-cloud-component: scope 21 | weave-scope-component: app 22 | -------------------------------------------------------------------------------- /apollo/apollo-configservice/apollo-configservice-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: apollo-configservice 5 | namespace: public-service 6 | data: 7 | application-github.properties: | 8 | spring.datasource.url = jdbc:mysql://192.168.30.131:3306/ApolloConfigDB?characterEncoding=utf8 9 | spring.datasource.username = root 10 | spring.datasource.password = 123456789 11 | apollo.config-service.url = http://apollo-configservice.public-service:8080 12 | apollo.admin-service.url = http://apollo-adminservice.public-service:8090 13 | -------------------------------------------------------------------------------- /zookeeper/zookeeper-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zk-hs 5 | namespace: public-service 6 | labels: 7 | app: zk 8 | spec: 9 | selector: 10 | app: zk 11 | ports: 12 | - port: 2888 13 | name: server 14 | - port: 3888 15 | name: leader-election 16 | clusterIP: None 17 | 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: zk-cs 23 | namespace: public-service 24 | labels: 25 | app: zk 26 | spec: 27 | selector: 28 | app: zk 29 | ports: 30 | - port: 2181 31 | name: client 32 | -------------------------------------------------------------------------------- /kafka/zookeeper/zookeeper-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zk-hs 5 | namespace: public-service 6 | labels: 7 | app: zk 8 | spec: 9 | selector: 10 | app: zk 11 | ports: 12 | - port: 2888 13 | name: server 14 | - port: 3888 15 | name: leader-election 16 | clusterIP: None 17 | 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: zk-cs 23 | namespace: public-service 24 | labels: 25 | app: zk 26 | spec: 27 | selector: 28 | app: zk 29 | ports: 30 | - port: 2181 31 | name: client 32 | -------------------------------------------------------------------------------- /prometheus/kube-state-metrics/kube-state-metrics-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: public-service 6 | labels: 7 | app: kube-state-metrics 8 | annotations: 9 | prometheus.io/scrape: 'true' 10 | prometheus.io/http-probe: 'true' 11 | prometheus.io/http-probe-path: '/healthz' 12 | prometheus.io/http-probe-port: '8080' 13 | spec: 14 | selector: 15 | app: kube-state-metrics 16 | ports: 17 | - name: kube-state-metrics 18 | port: 8080 19 | protocol: TCP 20 | targetPort: 8080 21 | -------------------------------------------------------------------------------- /elfk/README.md: -------------------------------------------------------------------------------- 1 | ### elfk 2 | 3 | - 部署: 4 | 5 | ```bash 6 | kubectl apply -f public-service-ns.yaml 7 | 8 | kubectl apply -f elasticsearch/ 9 | 10 | kubectl apply -f kibana/ 11 | 12 | kubectl apply -f logstash/ 13 | ``` 14 | 15 | filebeat以sidecar方式部署,每个应用的pod中包含filebeat容器。 16 | 17 | filebeat收集不同应用的日志时,ConfigMap应该独立,避免出错。 18 | 19 | - 示例: 20 | 21 | 此处以收集nginx和tomcat日志示例, 22 | 23 | ```bash 24 | kubectl apply -f filebeat/ 25 | ``` 26 | 27 | 任选一个node ip,在本地添加hosts: 28 | 29 | ```a 30 | 192.168.30.130 kibana.lzxlinux.com 31 | ``` 32 | 33 | 打开`kibana.lzxlinux.com`,访问kibana查看日志。 34 | 35 | --- 36 | -------------------------------------------------------------------------------- /harbor/harbor/conf/clair.yaml: -------------------------------------------------------------------------------- 1 | clair: 2 | database: 3 | type: pgsql 4 | options: 5 | source: "{{ template "harbor.database.clair" . }}" 6 | # Number of elements kept in the cache 7 | # Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database. 8 | cachesize: 16384 9 | api: 10 | # API server port 11 | port: 6060 12 | healthport: 6061 13 | # Deadline before an API request will respond with a 503 14 | timeout: 300s 15 | updater: 16 | interval: {{ .Values.clair.updatersInterval }}h 17 | -------------------------------------------------------------------------------- /gitlab/webservice/webservice-ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: gitlab-webservice 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: webservice 9 | spec: 10 | rules: 11 | - host: gitlab.lzxlinux.com 12 | http: 13 | paths: 14 | - path: / 15 | backend: 16 | serviceName: gitlab-webservice 17 | servicePort: 8181 18 | - path: /admin/sidekiq 19 | backend: 20 | serviceName: gitlab-webservice 21 | servicePort: 8080 22 | -------------------------------------------------------------------------------- /harbor/harbor/templates/ingress/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq (include "harbor.autoGenCertForIngress" .) "true" }} 2 | {{- $ca := genCA "harbor-ca" 365 }} 3 | {{- $cert := genSignedCert .Values.expose.ingress.hosts.core nil (list .Values.expose.ingress.hosts.core .Values.expose.ingress.hosts.notary) 365 $ca }} 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: "{{ template "harbor.ingress" . }}" 8 | labels: 9 | {{ include "harbor.labels" . | indent 4 }} 10 | type: kubernetes.io/tls 11 | data: 12 | tls.crt: {{ $cert.Cert | b64enc | quote }} 13 | tls.key: {{ $cert.Key | b64enc | quote }} 14 | ca.crt: {{ $ca.Cert | b64enc | quote }} 15 | {{- end }} -------------------------------------------------------------------------------- /redis/redis-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | namespace: public-service 6 | labels: 7 | app: redis 8 | spec: 9 | selector: 10 | app: redis 11 | appCluster: redis-cluster 12 | ports: 13 | - name: redis 14 | port: 6379 15 | clusterIP: None 16 | 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: redis-access 22 | namespace: public-service 23 | labels: 24 | app: redis 25 | spec: 26 | selector: 27 | app: redis 28 | appCluster: redis-cluster 29 | ports: 30 | - name: redis-access 31 | protocol: TCP 32 | port: 6379 33 | targetPort: 6379 34 | -------------------------------------------------------------------------------- /gitlab/shell/shell-sshd-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-shell-sshd 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab-shell 9 | data: 10 | sshd_config: | 11 | Port 2222 12 | PermitRootLogin no 13 | UsePrivilegeSeparation no 14 | PidFile /srv/sshd/sshd.pid 15 | AuthorizedKeysFile .ssh/authorized_keys 16 | AuthorizedKeysCommand /authorized_keys %u %k 17 | AuthorizedKeysCommandUser git 18 | PasswordAuthentication no 19 | AllowUsers git 20 | AcceptEnv GIT_PROTOCOL 21 | DisableForwarding yes 22 | MaxStartups 10:30:100 23 | LoginGraceTime 120 24 | -------------------------------------------------------------------------------- /harbor/harbor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: 2.1.0 3 | description: An open source trusted cloud native registry that stores, signs, and 4 | scans content 5 | engine: gotpl 6 | home: https://goharbor.io 7 | icon: https://raw.githubusercontent.com/goharbor/website/master/static/img/logos/harbor-icon-color.png 8 | keywords: 9 | - docker 10 | - registry 11 | - harbor 12 | maintainers: 13 | - email: yinw@vmware.com 14 | name: Wenkai Yin 15 | - email: hweiwei@vmware.com 16 | name: Weiwei He 17 | - email: dengq@vmware.com 18 | name: Qian Deng 19 | name: harbor 20 | sources: 21 | - https://github.com/goharbor/harbor 22 | - https://github.com/goharbor/harbor-helm 23 | version: 1.5.0 24 | -------------------------------------------------------------------------------- /gitlab/postgresql/postgresql-init-db-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-postgresql-init-db 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab 9 | data: 10 | init_revision.sh: | 11 | if [[ ! -f "$POSTGRESQL_VOLUME_DIR/.gitlab_1_scripts_initialized" ]] ; then 12 | rm -f "$POSTGRESQL_VOLUME_DIR/.user_scripts_initialized" 13 | touch "$POSTGRESQL_VOLUME_DIR/.gitlab_1_scripts_initialized" 14 | fi 15 | 16 | enable_extensions.sh: | 17 | PGPASSWORD=$(cat ${POSTGRES_POSTGRES_PASSWORD_FILE}) psql -d gitlabhq_production -U postgres -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm; CREATE EXTENSION IF NOT EXISTS btree_gist;' 18 | -------------------------------------------------------------------------------- /elfk/kibana/kibana-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kibana 5 | namespace: public-service 6 | labels: 7 | app: kibana 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: kibana 12 | template: 13 | metadata: 14 | labels: 15 | app: kibana 16 | spec: 17 | containers: 18 | - name: kibana 19 | image: docker.elastic.co/kibana/kibana-oss:7.6.2 20 | resources: 21 | limits: 22 | cpu: 1000m 23 | requests: 24 | cpu: 100m 25 | env: 26 | - name: ELASTICSEARCH_HOSTS 27 | value: "http://elasticsearch:9200" 28 | ports: 29 | - containerPort: 5601 30 | -------------------------------------------------------------------------------- /harbor/harbor/templates/core/core-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.core.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | ca.crt: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.core.crt\" is required!" .Values.internalTLS.core.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.core.key\" is required!" .Values.internalTLS.core.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} -------------------------------------------------------------------------------- /harbor/harbor/templates/portal/tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.portal.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | ca.crt: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.portal.crt\" is required!" .Values.internalTLS.portal.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.portal.key\" is required!" .Values.internalTLS.portal.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /harbor/harbor/templates/registry/registry-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.registry.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | ca.crt: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.registry.crt\" is required!" .Values.internalTLS.registry.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.registry.key\" is required!" .Values.internalTLS.registry.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: rmq-cluster 5 | namespace: public-service 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1beta1 9 | kind: Role 10 | metadata: 11 | name: rmq-cluster 12 | namespace: public-service 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | verbs: 19 | - get 20 | 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1beta1 23 | kind: RoleBinding 24 | metadata: 25 | name: rmq-cluster 26 | namespace: public-service 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: Role 30 | name: rmq-cluster 31 | subjects: 32 | - kind: ServiceAccount 33 | name: rmq-cluster 34 | namespace: public-service 35 | -------------------------------------------------------------------------------- /harbor/harbor/templates/clair/clair-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.clair.enabled .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.clair.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | ca.crt: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.clair.crt\" is required!" .Values.internalTLS.clair.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.clair.key\" is required!" .Values.internalTLS.clair.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} -------------------------------------------------------------------------------- /harbor/harbor/templates/trivy/trivy-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.trivy.enabled .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.trivy.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | ca.crt: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.trivy.crt\" is required!" .Values.internalTLS.trivy.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.trivy.key\" is required!" .Values.internalTLS.trivy.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /harbor/harbor/templates/jobservice/jobservice-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.jobservice.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | ca.crt: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.jobservice.crt\" is required!" .Values.internalTLS.jobservice.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.jobservice.key\" is required!" .Values.internalTLS.jobservice.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} -------------------------------------------------------------------------------- /prometheus/dingtalk/dingtalk-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: dingtalk 5 | namespace: public-service 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: dingtalk 11 | template: 12 | metadata: 13 | name: dingtalk 14 | labels: 15 | app: dingtalk 16 | spec: 17 | containers: 18 | - name: dingtalk 19 | image: timonwong/prometheus-webhook-dingtalk:latest 20 | imagePullPolicy: IfNotPresent 21 | ports: 22 | - containerPort: 8060 23 | volumeMounts: 24 | - name: config 25 | mountPath: /etc/prometheus-webhook-dingtalk 26 | volumes: 27 | - name: config 28 | configMap: 29 | name: dingtalk-config 30 | -------------------------------------------------------------------------------- /harbor/harbor/conf/notary-server.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": { 3 | "http_addr": ":4443" 4 | }, 5 | "trust_service": { 6 | "type": "remote", 7 | "hostname": "{{ template "harbor.notary-signer" . }}", 8 | "port": "7899", 9 | "tls_ca_file": "/etc/ssl/notary/ca.crt", 10 | "key_algorithm": "ecdsa" 11 | }, 12 | "logging": { 13 | "level": "{{ .Values.logLevel }}" 14 | }, 15 | "storage": { 16 | "backend": "postgres", 17 | "db_url": "{{ template "harbor.database.notaryServer" . }}" 18 | }, 19 | "auth": { 20 | "type": "token", 21 | "options": { 22 | "realm": "{{ .Values.externalURL }}/service/token", 23 | "service": "harbor-notary", 24 | "issuer": "harbor-token-issuer", 25 | "rootcertbundle": "/root.crt" 26 | } 27 | } 28 | } -------------------------------------------------------------------------------- /harbor/harbor/templates/jobservice/jobservice-cm-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: "{{ template "harbor.jobservice" . }}-env" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | data: 8 | CORE_URL: "{{ template "harbor.coreURL" . }}" 9 | TOKEN_SERVICE_URL: "{{ template "harbor.tokenServiceURL" . }}" 10 | REGISTRY_URL: "{{ template "harbor.registryURL" . }}" 11 | REGISTRY_CONTROLLER_URL: "{{ template "harbor.registryControllerURL" . }}" 12 | REGISTRY_CREDENTIAL_USERNAME: "{{ .Values.registry.credentials.username }}" 13 | {{- if has "jobservice" .Values.proxy.components }} 14 | HTTP_PROXY: "{{ .Values.proxy.httpProxy }}" 15 | HTTPS_PROXY: "{{ .Values.proxy.httpsProxy }}" 16 | NO_PROXY: "{{ template "harbor.noProxy" . }}" 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /harbor/harbor/templates/notary/notary-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.notary.enabled }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "harbor.notary-server" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | component: notary 9 | type: Opaque 10 | data: 11 | {{- if not .Values.notary.secretName }} 12 | {{- $ca := genCA "harbor-notary-ca" 365 }} 13 | {{- $cert := genSignedCert (include "harbor.notary-signer" .) nil nil 365 $ca }} 14 | ca.crt: {{ $ca.Cert | b64enc | quote }} 15 | tls.crt: {{ $cert.Cert | b64enc | quote }} 16 | tls.key: {{ $cert.Key | b64enc | quote }} 17 | {{- end }} 18 | server.json: {{ tpl (.Files.Get "conf/notary-server.json") . | b64enc }} 19 | signer.json: {{ tpl (.Files.Get "conf/notary-signer.json") . | b64enc }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /prometheus/node-exporter/node-exporter-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: node-exporter 5 | namespace: public-service 6 | labels: 7 | app: node-exporter 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: node-exporter 12 | template: 13 | metadata: 14 | name: node-exporter 15 | labels: 16 | app: node-exporter 17 | spec: 18 | containers: 19 | - name: node-exporter 20 | image: prom/node-exporter:latest 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - containerPort: 9100 24 | hostPort: 9100 25 | hostNetwork: true 26 | hostPID: true 27 | tolerations: 28 | - key: node-role.kubernetes.io/master 29 | operator: Exists 30 | effect: NoSchedule 31 | -------------------------------------------------------------------------------- /harbor/harbor/templates/chartmuseum/chartmuseum-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.chartmuseum.enabled .Values.internalTLS.enabled }} 2 | {{- if eq .Values.internalTLS.certSource "manual" }} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: "{{ template "harbor.internalTLS.chartmuseum.secretName" . }}" 7 | labels: 8 | {{ include "harbor.labels" . | indent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | tls.ca: {{ (required "The \"internalTLS.trustCa\" is required!" .Values.internalTLS.trustCa) | b64enc | quote }} 12 | tls.crt: {{ (required "The \"internalTLS.chartmuseum.crt\" is required!" .Values.internalTLS.chartmuseum.crt) | b64enc | quote }} 13 | tls.key: {{ (required "The \"internalTLS.chartmuseum.key\" is required!" .Values.internalTLS.chartmuseum.key) | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /harbor/harbor/templates/notary/notary-svc.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.notary.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "harbor.notary-server" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | spec: 9 | {{- if (eq .Values.expose.ingress.controller "gce") }} 10 | type: NodePort 11 | {{- end }} 12 | ports: 13 | - port: 4443 14 | selector: 15 | {{ include "harbor.matchLabels" . | indent 4 }} 16 | component: notary-server 17 | 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: {{ template "harbor.notary-signer" . }} 23 | labels: 24 | {{ include "harbor.labels" . | indent 4 }} 25 | spec: 26 | ports: 27 | - port: 7899 28 | selector: 29 | {{ include "harbor.matchLabels" . | indent 4 }} 30 | component: notary-signer 31 | {{ end }} -------------------------------------------------------------------------------- /elfk/logstash/logstash-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: logstash-config 5 | namespace: public-service 6 | data: 7 | logstash.yml: | 8 | http.host: "0.0.0.0" 9 | path.config: /usr/share/logstash/pipeline 10 | logstash.conf: | 11 | input { 12 | beats { 13 | port => 5044 14 | } 15 | } 16 | 17 | filter { 18 | #multiline { 19 | #pattern => "^\d{4}-\d{1,2}-\d{1,2}\s\d{1,2}:\d{1,2}:\d{1,2}" 20 | #negate => true 21 | #what => "previous" 22 | #} 23 | grok { 24 | match => [ "message", "%{TIMESTAMP_ISO8601:logtime} %{LOGLEVEL:level}" ] 25 | } 26 | } 27 | 28 | output { 29 | elasticsearch { 30 | hosts => ["elasticsearch:9200"] 31 | index => "your-index-%{+YYYY.MM.dd}" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /harbor/harbor/templates/core/core-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ template "harbor.core" . }} 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | secretKey: {{ .Values.secretKey | b64enc | quote }} 10 | secret: {{ .Values.core.secret | default (randAlphaNum 16) | b64enc | quote }} 11 | {{- if not .Values.core.secretName }} 12 | tls.crt: {{ .Files.Get "cert/tls.crt" | b64enc }} 13 | tls.key: {{ .Files.Get "cert/tls.key" | b64enc }} 14 | {{- end }} 15 | HARBOR_ADMIN_PASSWORD: {{ .Values.harborAdminPassword | b64enc | quote }} 16 | POSTGRESQL_PASSWORD: {{ template "harbor.database.encryptedPassword" . }} 17 | REGISTRY_CREDENTIAL_PASSWORD: {{ .Values.registry.credentials.password | b64enc | quote }} 18 | CSRF_KEY: {{ .Values.core.xsrfKey | default (randAlphaNum 32) | b64enc | quote }} 19 | -------------------------------------------------------------------------------- /gitlab-runner/gitlab-runner-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: gitlab-runner 5 | namespace: public-service 6 | labels: 7 | app: gitlab-runner 8 | 9 | --- 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: Role 12 | metadata: 13 | name: gitlab-runner 14 | labels: 15 | app: gitlab-runner 16 | rules: 17 | - apiGroups: [""] 18 | resources: [pods, pods/exec, secrets] 19 | verbs: [get, list, watch, create, patch, delete] 20 | 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: RoleBinding 24 | metadata: 25 | name: gitlab-runner 26 | namespace: public-service 27 | labels: 28 | app: gitlab-runner 29 | roleRef: 30 | apiGroup: rbac.authorization.k8s.io 31 | kind: Role 32 | name: gitlab-runner 33 | subjects: 34 | - kind: ServiceAccount 35 | name: gitlab-runner 36 | namespace: public-service 37 | -------------------------------------------------------------------------------- /rabbitmq/README.md: -------------------------------------------------------------------------------- 1 | ### rabbitmq 2 | 3 | - 部署: 4 | 5 | 阿里云创建NAS共享存储的StorageClass: 6 | 7 | ```bash 8 | kubectl apply -f rabbitmq-sc.yaml 9 | ``` 10 | 11 | ```bash 12 | kubectl apply -f ./ 13 | ``` 14 | 15 | 部署完毕后, 16 | 17 | ```bash 18 | kubectl get all -n public-service 19 | 20 | NAME READY STATUS RESTARTS AGE 21 | pod/rmq-cluster-0 1/1 Running 0 4h 22 | pod/rmq-cluster-1 1/1 Running 0 4h 23 | pod/rmq-cluster-2 1/1 Running 0 4h 24 | 25 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 26 | service/rmq-cluster ClusterIP 172.21.11.245 15672/TCP,5672/TCP 4h 27 | 28 | NAME READY AGE 29 | statefulset.apps/rmq-cluster 3/3 4h 30 | ``` 31 | 32 | 添加hosts:`rabbitmq.lzxlinux.com`,使用初始账号密码`guest/guest`登录即可。 33 | 34 | --- 35 | -------------------------------------------------------------------------------- /prometheus/kube-state-metrics/kube-state-metrics-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: public-service 6 | labels: 7 | app: kube-state-metrics 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: kube-state-metrics 13 | template: 14 | metadata: 15 | labels: 16 | app: kube-state-metrics 17 | spec: 18 | serviceAccountName: kube-state-metrics 19 | containers: 20 | - name: kube-state-metrics 21 | image: quay.mirrors.ustc.edu.cn/coreos/kube-state-metrics:v1.9.7 22 | imagePullPolicy: IfNotPresent 23 | ports: 24 | - containerPort: 8080 25 | nodeSelector: 26 | node-role.kubernetes.io/master: "" 27 | tolerations: 28 | - key: node-role.kubernetes.io/master 29 | operator: Exists 30 | effect: NoSchedule 31 | -------------------------------------------------------------------------------- /harbor/harbor/.github/workflows/unittest.yaml: -------------------------------------------------------------------------------- 1 | name: Unit test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | 7 | jobs: 8 | unit-test: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@v2 13 | 14 | - name: Set up Helm 3.2.3 15 | uses: azure/setup-helm@v1 16 | with: 17 | version: '3.2.3' 18 | 19 | - name: Set up Go 1.13 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: 1.13 23 | 24 | - name: Cache go mod 25 | uses: actions/cache@v2 26 | with: 27 | path: ~/go/pkg/mod 28 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 29 | restore-keys: | 30 | ${{ runner.os }}-go- 31 | 32 | - name: Run unit tests 33 | working-directory: ./test 34 | run: 35 | go test -v github.com/goharbor/harbor-helm/unittest 36 | -------------------------------------------------------------------------------- /prometheus/blackbox-exporter/blackbox-exporter-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: blackbox-exporter 5 | namespace: public-service 6 | labels: 7 | app: blackbox-exporter 8 | data: 9 | blackbox.yml: |- 10 | modules: 11 | http_2xx: 12 | prober: http 13 | timeout: 10s 14 | http: 15 | valid_http_versions: ["HTTP/1.1", "HTTP/2"] 16 | valid_status_codes: [] 17 | method: GET 18 | preferred_ip_protocol: "ip4" 19 | http_post_2xx: 20 | prober: http 21 | timeout: 10s 22 | http: 23 | valid_http_versions: ["HTTP/1.1", "HTTP/2"] 24 | method: POST 25 | preferred_ip_protocol: "ip4" 26 | tcp_connect: 27 | prober: tcp 28 | timeout: 10s 29 | icmp: 30 | prober: icmp 31 | timeout: 10s 32 | icmp: 33 | preferred_ip_protocol: "ip4" 34 | -------------------------------------------------------------------------------- /harbor/harbor/templates/nginx/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq (include "harbor.autoGenCertForNginx" .) "true" }} 2 | {{- $ca := genCA "harbor-ca" 365 }} 3 | {{- $cn := (required "The \"expose.tls.auto.commonName\" is required!" .Values.expose.tls.auto.commonName) }} 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: {{ template "harbor.nginx" . }} 8 | labels: 9 | {{ include "harbor.labels" . | indent 4 }} 10 | type: Opaque 11 | data: 12 | {{- if regexMatch `^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$` $cn }} 13 | {{- $cert := genSignedCert $cn (list $cn) nil 365 $ca }} 14 | tls.crt: {{ $cert.Cert | b64enc | quote }} 15 | tls.key: {{ $cert.Key | b64enc | quote }} 16 | ca.crt: {{ $ca.Cert | b64enc | quote }} 17 | {{- else }} 18 | {{- $cert := genSignedCert $cn nil (list $cn) 365 $ca }} 19 | tls.crt: {{ $cert.Cert | b64enc | quote }} 20 | tls.key: {{ $cert.Key | b64enc | quote }} 21 | ca.crt: {{ $ca.Cert | b64enc | quote }} 22 | {{- end }} 23 | {{- end }} -------------------------------------------------------------------------------- /prometheus/prometheus/prometheus-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: public-service 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | name: prometheus 12 | rules: 13 | - apiGroups: [""] 14 | resources: 15 | - nodes 16 | - nodes/proxy 17 | - services 18 | - endpoints 19 | - pods 20 | verbs: ["get", "list", "watch"] 21 | - apiGroups: ["networking.k8s.io"] 22 | resources: 23 | - ingresses 24 | verbs: ["get", "list", "watch"] 25 | - apiGroups: [""] 26 | resources: 27 | - configmaps 28 | verbs: ["get"] 29 | - nonResourceURLs: ["/metrics"] 30 | verbs: ["get"] 31 | 32 | --- 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | kind: ClusterRoleBinding 35 | metadata: 36 | name: prometheus 37 | roleRef: 38 | apiGroup: rbac.authorization.k8s.io 39 | kind: ClusterRole 40 | name: prometheus 41 | subjects: 42 | - kind: ServiceAccount 43 | name: prometheus 44 | namespace: public-service 45 | -------------------------------------------------------------------------------- /weave-scope/README.md: -------------------------------------------------------------------------------- 1 | ### weave-scope 2 | 3 | - 部署: 4 | 5 | ```bash 6 | kubectl apply -f weave-ns.yaml 7 | 8 | kubectl apply -f manifests/ 9 | ``` 10 | 11 | ```bash 12 | kubectl get svc -n weave 13 | 14 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 15 | weave-scope-app ClusterIP 10.106.124.95 80/TCP 25s 16 | 17 | kubectl get pod -n weave 18 | 19 | NAME READY STATUS RESTARTS AGE 20 | weave-scope-agent-27zpb 1/1 Running 0 32s 21 | weave-scope-agent-c5hcq 1/1 Running 0 32s 22 | weave-scope-agent-j4tf7 1/1 Running 0 32s 23 | weave-scope-agent-s8p6s 1/1 Running 0 32s 24 | weave-scope-app-bc7444d59-6xwkk 1/1 Running 0 33s 25 | weave-scope-cluster-agent-5c5dcc8cb-4d7mh 1/1 Running 0 33s 26 | ``` 27 | 28 | - 访问ui: 29 | 30 | 添加hosts:`scope.lzxlinux.com`,访问`scope.lzxlinux.com`。 31 | 32 | --- 33 | -------------------------------------------------------------------------------- /elfk/logstash/logstash-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: logstash 5 | namespace: public-service 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: logstash 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | app: logstash 15 | spec: 16 | containers: 17 | - name: logstash 18 | image: docker.elastic.co/logstash/logstash-oss:7.6.2 19 | ports: 20 | - containerPort: 5044 21 | volumeMounts: 22 | - name: config 23 | mountPath: /usr/share/logstash/config 24 | - name: pipeline 25 | mountPath: /usr/share/logstash/pipeline 26 | volumes: 27 | - name: config 28 | configMap: 29 | name: logstash-config 30 | items: 31 | - key: logstash.yml 32 | path: logstash.yml 33 | - name: pipeline 34 | configMap: 35 | name: logstash-config 36 | items: 37 | - key: logstash.conf 38 | path: logstash.conf 39 | -------------------------------------------------------------------------------- /weave-scope/manifests/weave-scope-app-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: weave-scope-app 5 | namespace: weave 6 | labels: 7 | name: weave-scope-app 8 | app: weave-scope 9 | weave-cloud-component: scope 10 | weave-scope-component: app 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | name: weave-scope-app 16 | app: weave-scope 17 | weave-cloud-component: scope 18 | weave-scope-component: app 19 | template: 20 | metadata: 21 | labels: 22 | name: weave-scope-app 23 | app: weave-scope 24 | weave-cloud-component: scope 25 | weave-scope-component: app 26 | spec: 27 | containers: 28 | - name: app 29 | image: docker.io/weaveworks/scope:1.13.1 30 | imagePullPolicy: IfNotPresent 31 | ports: 32 | - containerPort: 4040 33 | protocol: TCP 34 | args: 35 | - '--mode=app' 36 | command: 37 | - /home/weave/scope 38 | env: [] 39 | -------------------------------------------------------------------------------- /consul/server/consul-server-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: consul-server 5 | namespace: public-service 6 | labels: 7 | app: consul 8 | component: server 9 | spec: 10 | selector: 11 | app: consul 12 | component: server 13 | ports: 14 | - name: http 15 | port: 8500 16 | targetPort: 8500 17 | - name: dns-tcp 18 | protocol: TCP 19 | port: 8600 20 | targetPort: dns-tcp 21 | - name: dns-udp 22 | protocol: "UDP" 23 | port: 8600 24 | targetPort: dns-udp 25 | - name: serflan-tcp 26 | protocol: TCP 27 | port: 8301 28 | targetPort: 8301 29 | - name: serflan-udp 30 | protocol: UDP 31 | port: 8301 32 | targetPort: 8302 33 | - name: serfwan-tcp 34 | protocol: TCP 35 | port: 8302 36 | targetPort: 8302 37 | - name: serfwan-udp 38 | protocol: UDP 39 | port: 8302 40 | targetPort: 8302 41 | - name: server 42 | port: 8300 43 | targetPort: 8300 44 | publishNotReadyAddresses: true 45 | clusterIP: None 46 | -------------------------------------------------------------------------------- /harbor/harbor/templates/chartmuseum/chartmuseum-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.chartmuseum.enabled }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: "{{ template "harbor.chartmuseum" . }}" 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | type: Opaque 9 | data: 10 | CACHE_REDIS_PASSWORD: {{ include "harbor.redis.password" . | b64enc | quote }} 11 | {{- $storage := .Values.persistence.imageChartStorage }} 12 | {{- $storageType := $storage.type }} 13 | {{- if eq $storageType "azure" }} 14 | AZURE_STORAGE_ACCESS_KEY: {{ $storage.azure.accountkey | b64enc | quote }} 15 | {{- else if eq $storageType "gcs" }} 16 | # TODO support the keyfile of gcs 17 | {{- else if eq $storageType "s3" }} 18 | {{- if $storage.s3.secretkey }} 19 | AWS_SECRET_ACCESS_KEY: {{ $storage.s3.secretkey | b64enc | quote }} 20 | {{- end }} 21 | {{- else if eq $storageType "swift" }} 22 | OS_PASSWORD: {{ $storage.swift.password | b64enc | quote }} 23 | {{- else if eq $storageType "oss" }} 24 | ALIBABA_CLOUD_ACCESS_KEY_SECRET: {{ $storage.oss.accesskeysecret | b64enc | quote }} 25 | {{- end }} 26 | {{- end }} -------------------------------------------------------------------------------- /rabbitmq/rabbitmq-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: rmq-cluster-config 5 | namespace: public-service 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | data: 9 | enabled_plugins: | 10 | [rabbitmq_management,rabbitmq_peer_discovery_k8s]. 11 | rabbitmq.conf: | 12 | loopback_users.guest = false 13 | 14 | ## Clustering 15 | cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s 16 | cluster_formation.k8s.host = kubernetes.default.svc.cluster.local 17 | cluster_formation.k8s.address_type = hostname 18 | ################################################# 19 | # public-service is rabbitmq-cluster's namespace# 20 | ################################################# 21 | cluster_formation.k8s.hostname_suffix = .rmq-cluster.public-service.svc.cluster.local 22 | cluster_formation.node_cleanup.interval = 10 23 | cluster_formation.node_cleanup.only_log_warning = true 24 | cluster_partition_handling = autoheal 25 | ## queue master locator 26 | queue_master_locator=min-masters 27 | -------------------------------------------------------------------------------- /harbor/harbor/templates/registry/registry-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.persistence.enabled }} 2 | {{- $registry := .Values.persistence.persistentVolumeClaim.registry -}} 3 | {{- if and (not $registry.existingClaim) (eq .Values.persistence.imageChartStorage.type "filesystem") }} 4 | kind: PersistentVolumeClaim 5 | apiVersion: v1 6 | metadata: 7 | name: {{ template "harbor.registry" . }} 8 | {{- if eq .Values.persistence.resourcePolicy "keep" }} 9 | annotations: 10 | helm.sh/resource-policy: keep 11 | {{- end }} 12 | labels: 13 | {{ include "harbor.labels" . | indent 4 }} 14 | component: registry 15 | spec: 16 | accessModes: 17 | - {{ $registry.accessMode }} 18 | resources: 19 | requests: 20 | storage: {{ $registry.size }} 21 | {{- if $registry.storageClass }} 22 | {{- if eq "-" $registry.storageClass }} 23 | storageClassName: "" 24 | {{- else }} 25 | storageClassName: {{ $registry.storageClass }} 26 | {{- end }} 27 | {{- end }} 28 | selector: 29 | matchLabels: 30 | {{ include "harbor.matchLabels" . | indent 6 }} 31 | component: registry 32 | {{- end }} 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /harbor/harbor/templates/jobservice/jobservice-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- $jobservice := .Values.persistence.persistentVolumeClaim.jobservice -}} 2 | {{- if and .Values.persistence.enabled (not $jobservice.existingClaim) }} 3 | {{- if eq .Values.jobservice.jobLogger "file" }} 4 | kind: PersistentVolumeClaim 5 | apiVersion: v1 6 | metadata: 7 | name: {{ template "harbor.jobservice" . }} 8 | {{- if eq .Values.persistence.resourcePolicy "keep" }} 9 | annotations: 10 | helm.sh/resource-policy: keep 11 | {{- end }} 12 | labels: 13 | {{ include "harbor.labels" . | indent 4 }} 14 | component: jobservice 15 | spec: 16 | accessModes: 17 | - {{ $jobservice.accessMode }} 18 | resources: 19 | requests: 20 | storage: {{ $jobservice.size }} 21 | {{- if $jobservice.storageClass }} 22 | {{- if eq "-" $jobservice.storageClass }} 23 | storageClassName: "" 24 | {{- else }} 25 | storageClassName: {{ $jobservice.storageClass }} 26 | {{- end }} 27 | {{- end }} 28 | selector: 29 | matchLabels: 30 | {{ include "harbor.matchLabels" . | indent 6 }} 31 | component: jobservice 32 | {{- end }} 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 [Tobewont] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /gitlab/minio/minio-create-buckets-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: gitlab-minio-create-buckets-1 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: minio 9 | spec: 10 | activeDeadlineSeconds: 600 11 | template: 12 | metadata: 13 | labels: 14 | app: gitlab 15 | component: minio 16 | spec: 17 | restartPolicy: OnFailure 18 | volumes: 19 | - name: minio-configuration 20 | projected: 21 | sources: 22 | - configMap: 23 | name: gitlab-minio-config-cm 24 | - secret: 25 | name: "gitlab-minio-secret" 26 | containers: 27 | - name: minio-mc 28 | image: minio/mc:RELEASE.2018-07-13T00-53-22Z 29 | imagePullPolicy: IfNotPresent 30 | command: ["/bin/sh", "/config/initialize"] 31 | env: 32 | - name: MINIO_ENDPOINT 33 | value: gitlab-minio-svc 34 | - name: MINIO_PORT 35 | value: "9000" 36 | resources: 37 | requests: 38 | cpu: 50m 39 | volumeMounts: 40 | - name: minio-configuration 41 | mountPath: /config 42 | -------------------------------------------------------------------------------- /harbor/harbor/templates/chartmuseum/chartmuseum-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.chartmuseum.enabled }} 2 | {{- $persistence := .Values.persistence -}} 3 | {{- if $persistence.enabled }} 4 | {{- $chartmuseum := $persistence.persistentVolumeClaim.chartmuseum -}} 5 | {{- if and (not $chartmuseum.existingClaim) (eq $persistence.imageChartStorage.type "filesystem") }} 6 | kind: PersistentVolumeClaim 7 | apiVersion: v1 8 | metadata: 9 | name: {{ template "harbor.chartmuseum" . }} 10 | {{- if eq $persistence.resourcePolicy "keep" }} 11 | annotations: 12 | helm.sh/resource-policy: keep 13 | {{- end }} 14 | labels: 15 | {{ include "harbor.labels" . | indent 4 }} 16 | component: chartmuseum 17 | spec: 18 | accessModes: 19 | - {{ $chartmuseum.accessMode }} 20 | resources: 21 | requests: 22 | storage: {{ $chartmuseum.size }} 23 | {{- if $chartmuseum.storageClass }} 24 | {{- if eq "-" $chartmuseum.storageClass }} 25 | storageClassName: "" 26 | {{- else }} 27 | storageClassName: {{ $chartmuseum.storageClass }} 28 | {{- end }} 29 | {{- end }} 30 | selector: 31 | matchLabels: 32 | {{ include "harbor.matchLabels" . | indent 6 }} 33 | component: chartmuseum 34 | {{- end }} 35 | {{- end }} 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /elfk+xpack/elasticsearch-head/head.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: head 5 | namespace: log 6 | spec: 7 | rules: 8 | - host: head.lzxlinux.cn 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: head 14 | servicePort: 9100 15 | 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: head 21 | namespace: log 22 | labels: 23 | app: head 24 | spec: 25 | selector: 26 | app: head 27 | ports: 28 | - port: 9100 29 | protocol: TCP 30 | targetPort: 9100 31 | 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | name: head 37 | namespace: log 38 | labels: 39 | app: head 40 | spec: 41 | replicas: 1 42 | selector: 43 | matchLabels: 44 | app: head 45 | template: 46 | metadata: 47 | labels: 48 | app: head 49 | spec: 50 | containers: 51 | - name: head 52 | image: mobz/elasticsearch-head:5 53 | resources: 54 | limits: 55 | cpu: 200m 56 | memory: 200Mi 57 | requests: 58 | cpu: 100m 59 | memory: 100Mi 60 | ports: 61 | - containerPort: 9100 62 | -------------------------------------------------------------------------------- /gitlab/shell/shell-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-shell 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab-shell 9 | data: 10 | configure: | 11 | set -e 12 | config_dir="/init-config" 13 | secret_dir="/init-secrets" 14 | 15 | for secret in shell ; do 16 | mkdir -p "${secret_dir}/${secret}" 17 | cp -v -r -L "${config_dir}/${secret}/." "${secret_dir}/${secret}/" 18 | done 19 | for secret in redis minio objectstorage postgres ldap omniauth smtp kas ; do 20 | if [ -e "${config_dir}/${secret}" ]; then 21 | mkdir -p "${secret_dir}/${secret}" 22 | cp -v -r -L "${config_dir}/${secret}/." "${secret_dir}/${secret}/" 23 | fi 24 | done 25 | mkdir -p /${secret_dir}/ssh 26 | cp -v -r -L /${config_dir}/ssh_host_* /${secret_dir}/ssh/ 27 | chmod 0400 /${secret_dir}/ssh/ssh_host_* 28 | 29 | config.yml.erb: | 30 | user: git 31 | gitlab_url: "http://gitlab-webservice:8181/" 32 | secret_file: /etc/gitlab-secrets/shell/.gitlab_shell_secret 33 | http_settings: 34 | self_signed_cert: false 35 | auth_file: "/home/git/.ssh/authorized_keys" 36 | log_level: INFO 37 | audit_usernames: false 38 | -------------------------------------------------------------------------------- /harbor/harbor/.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | pull_request: 5 | push: 6 | 7 | jobs: 8 | lint: 9 | runs-on: ubuntu-latest 10 | strategy: 11 | matrix: 12 | helm_version: [3.2.3, 2.16.8] 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v2 16 | with: 17 | path: harbor 18 | 19 | - name: Set up Helm 20 | uses: azure/setup-helm@v1 21 | with: 22 | version: '${{ matrix.helm_version }}' 23 | 24 | - name: Helm version 25 | run: 26 | helm version -c 27 | 28 | - name: Run lint 29 | working-directory: ./harbor 30 | run: 31 | helm lint . 32 | 33 | - name: Update dependency 34 | working-directory: ./harbor 35 | run: 36 | helm dependency update . 37 | 38 | - name: Run template for ingress expose 39 | working-directory: ./harbor 40 | run: 41 | helm template --set "expose.type=ingress" --output-dir $(mktemp -d -t output-XXXXXXXXXX) . 42 | 43 | - name: Run template for nodePort expose 44 | working-directory: ./harbor 45 | run: 46 | helm template --set "expose.type=nodePort,expose.tls.auto.commonName=127.0.0.1" --output-dir $(mktemp -d -t output-XXXXXXXXXX) . -------------------------------------------------------------------------------- /kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | ENV KAFKA_USER=kafka \ 3 | KAFKA_DATA_DIR=/var/lib/kafka/data \ 4 | JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 \ 5 | KAFKA_HOME=/opt/kafka \ 6 | PATH=$PATH:/opt/kafka/bin 7 | 8 | ARG KAFKA_VERSION=2.2.2 9 | ARG KAFKA_DIST=kafka_2.12-2.2.2 10 | 11 | RUN set -x \ 12 | && apt-get update \ 13 | && apt-get install -y wget openjdk-8-jre-headless gpg-agent \ 14 | && wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/$KAFKA_VERSION/$KAFKA_DIST.tgz \ 15 | && wget http://www.apache.org/dist/kafka/$KAFKA_VERSION/$KAFKA_DIST.tgz.asc \ 16 | && wget http://kafka.apache.org/KEYS \ 17 | && export GNUPGHOME="$(mktemp -d)" \ 18 | && gpg --import KEYS \ 19 | && gpg --batch --verify "$KAFKA_DIST.tgz.asc" "$KAFKA_DIST.tgz" \ 20 | && tar -xzf "$KAFKA_DIST.tgz" -C /opt \ 21 | && rm -r "$GNUPGHOME" "$KAFKA_DIST.tgz" "$KAFKA_DIST.tgz.asc" 22 | 23 | COPY log4j.properties /opt/$KAFKA_DIST/config/ 24 | 25 | RUN set -x \ 26 | && ln -s /opt/$KAFKA_DIST $KAFKA_HOME \ 27 | && useradd $KAFKA_USER \ 28 | && [ `id -u $KAFKA_USER` -eq 1000 ] \ 29 | && [ `id -g $KAFKA_USER` -eq 1000 ] \ 30 | && mkdir -p $KAFKA_DATA_DIR \ 31 | && chown -R "$KAFKA_USER:$KAFKA_USER" /opt/$KAFKA_DIST \ 32 | && chown -R "$KAFKA_USER:$KAFKA_USER" $KAFKA_DATA_DIR 33 | -------------------------------------------------------------------------------- /gitlab/shared-secrets/shared-secrets-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: gitlab-shared-secrets-1-7d3 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: shared-secrets 9 | annotations: 10 | "helm.sh/hook": pre-install,pre-upgrade 11 | "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation 12 | spec: 13 | template: 14 | metadata: 15 | labels: 16 | app: gitlab 17 | component: shared-secrets 18 | spec: 19 | containers: 20 | - name: shared-secrets 21 | image: registry.gitlab.com/gitlab-org/build/cng/kubectl:1.13.12 22 | imagePullPolicy: IfNotPresent 23 | command: ['/bin/bash', '/scripts/generate-secrets'] 24 | resources: 25 | requests: 26 | cpu: 50m 27 | volumeMounts: 28 | - name: scripts 29 | mountPath: /scripts 30 | - name: ssh 31 | mountPath: /etc/ssh 32 | securityContext: 33 | runAsUser: 65534 34 | fsGroup: 65534 35 | serviceAccountName: gitlab-shared-secrets 36 | restartPolicy: Never 37 | volumes: 38 | - name: scripts 39 | configMap: 40 | name: gitlab-shared-secrets 41 | - name: ssh 42 | emptyDir: {} 43 | -------------------------------------------------------------------------------- /gitlab-runner/README.md: -------------------------------------------------------------------------------- 1 | ### gitlab-runner 2 | 3 | - 查看注册url和token: 4 | 5 | ![Image text](https://github.com/Tobewont/kubernetes/blob/master/gitlab-runner/img/runner-1.png) 6 | 7 | - base64: 8 | 9 | ```bash 10 | echo -n 'HtyY1AEoI5KsRjJzOZAA14ekUJ8aJiA1Cc049cAZC0MDFY5EyN0kOcvbTwnCG7sl' | base64 #base64加密 11 | 12 | SHR5WTFBRW9JNUtzUmpKek9aQUExNGVrVUo4YUppQTFDYzA0OWNBWkMwTURGWTVFeU4wa09jdmJUd25DRzdzbA== 13 | ``` 14 | 15 | ```bash 16 | echo 'SHR5WTFBRW9JNUtzUmpKek9aQUExNGVrVUo4YUppQTFDYzA0OWNBWkMwTURGWTVFeU4wa09jdmJUd25DRzdzbA==' | base64 -d #base64解密 17 | 18 | HtyY1AEoI5KsRjJzOZAA14ekUJ8aJiA1Cc049cAZC0MDFY5EyN0kOcvbTwnCG7sl 19 | ``` 20 | 21 | 加解密工具:http://tool.chinaz.com/tools/base64.aspx 22 | 23 | - 部署: 24 | 25 | ```bash 26 | kubectl apply -f public-service-ns.yaml 27 | 28 | kubectl apply -f gitlab-runner-rbac.yaml 29 | 30 | kubectl apply -f gitlab-runner-secret.yaml 31 | 32 | kubectl apply -f gitlab-runner-cm.yaml 33 | 34 | kubectl apply -f gitlab-runner-deploy.yaml 35 | ``` 36 | 37 | ```bash 38 | kubectl get pod -n public-service | grep gitlab-runner 39 | 40 | gitlab-runner-5dddb4498f-dvr4r 1/1 Running 0 22s 41 | ``` 42 | 43 | ![Image text](https://github.com/Tobewont/kubernetes/blob/master/gitlab-runner/img/runner-2.png) 44 | 45 | k8s部署gitlab-runner完成。 46 | 47 | --- 48 | -------------------------------------------------------------------------------- /prometheus/blackbox-exporter/blackbox-exporter-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: blackbox-exporter 5 | namespace: public-service 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: blackbox-exporter 11 | template: 12 | metadata: 13 | labels: 14 | app: blackbox-exporter 15 | spec: 16 | containers: 17 | - name: blackbox-exporter 18 | image: prom/blackbox-exporter:latest 19 | imagePullPolicy: IfNotPresent 20 | ports: 21 | - containerPort: 9115 22 | readinessProbe: 23 | tcpSocket: 24 | port: 9115 25 | initialDelaySeconds: 10 26 | timeoutSeconds: 5 27 | resources: 28 | requests: 29 | memory: 50Mi 30 | cpu: 100m 31 | limits: 32 | memory: 60Mi 33 | cpu: 200m 34 | volumeMounts: 35 | - name: config 36 | mountPath: /etc/blackbox_exporter 37 | args: 38 | - '--config.file=/etc/blackbox_exporter/blackbox.yml' 39 | - '--web.listen-address=:9115' 40 | volumes: 41 | - name: config 42 | configMap: 43 | name: blackbox-exporter 44 | nodeSelector: 45 | node-role.kubernetes.io/master: "" 46 | tolerations: 47 | - key: node-role.kubernetes.io/master 48 | operator: Exists 49 | effect: NoSchedule 50 | -------------------------------------------------------------------------------- /elfk+xpack/filebeat/filebeat-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: filebeat-config 5 | namespace: default 6 | labels: 7 | app: filebeat 8 | data: 9 | filebeat.yml: |- 10 | filebeat.config: 11 | inputs: 12 | path: ${path.config}/inputs.d/*.yml 13 | reload.enabled: false 14 | modules: 15 | path: ${path.config}/modules.d/*.yml 16 | reload.enabled: false 17 | 18 | filebeat.inputs: 19 | - type: log 20 | enabled: true 21 | tail_files: true 22 | backoff: "1s" 23 | paths: 24 | - /nginxlog/*.log 25 | fields: 26 | pod_name: '${pod_name}' 27 | POD_IP: '${POD_IP}' 28 | type: nginx_access 29 | fields_under_root: true 30 | multiline.pattern: '\d+\.\d+\.\d+\.\d+' 31 | multiline.negate: true 32 | multiline.match: after 33 | 34 | - type: log 35 | enabled: true 36 | tail_files: true 37 | backoff: "1s" 38 | paths: 39 | - /tomcatlog/*.log 40 | fields: 41 | pod_name: '${pod_name}' 42 | POD_IP: '${POD_IP}' 43 | type: tomcat_access 44 | fields_under_root: true 45 | multiline.pattern: '\d+\-\w+\-\d+ \d+:\d+:\d+\.\d+' 46 | multiline.negate: true 47 | multiline.match: after 48 | 49 | output.logstash: 50 | hosts: ["logstash.log:5040"] 51 | enabled: true 52 | worker: 1 53 | compression_level: 3 54 | -------------------------------------------------------------------------------- /elfk+xpack/kibana/kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: kibana 5 | namespace: log 6 | spec: 7 | rules: 8 | - host: kibana.lzxlinux.cn 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: kibana 14 | servicePort: 5601 15 | 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: kibana 21 | namespace: log 22 | labels: 23 | app: kibana 24 | spec: 25 | selector: 26 | app: kibana 27 | ports: 28 | - port: 5601 29 | protocol: TCP 30 | targetPort: 5601 31 | 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | name: kibana 37 | namespace: log 38 | labels: 39 | app: kibana 40 | spec: 41 | replicas: 1 42 | selector: 43 | matchLabels: 44 | app: kibana 45 | template: 46 | metadata: 47 | labels: 48 | app: kibana 49 | spec: 50 | containers: 51 | - name: kibana 52 | image: kibana:7.10.1 53 | ports: 54 | - containerPort: 5601 55 | resources: 56 | limits: 57 | cpu: 500m 58 | memory: 500Mi 59 | requests: 60 | cpu: 500m 61 | memory: 500Mi 62 | volumeMounts: 63 | - name: config 64 | mountPath: /usr/share/kibana/config 65 | volumes: 66 | - name: config 67 | configMap: 68 | name: kibana-config 69 | -------------------------------------------------------------------------------- /prometheus/alertmanager/alertmanager-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: alertmanager-config 5 | namespace: public-service 6 | data: 7 | config.yml: |- 8 | global: 9 | resolve_timeout: 5m 10 | smtp_smarthost: 'smtp.163.com:465' #邮箱smtp服务器代理,启用SSL发信, 端口一般是465 11 | smtp_from: 'alert@163.com' #发送邮箱名称 12 | smtp_auth_username: 'alert@163.com' #邮箱名称 13 | smtp_auth_password: 'password' #邮箱密码或授权码 14 | smtp_require_tls: false 15 | templates: 16 | - '/etc/templates/*.tmpl' 17 | route: 18 | receiver: 'default' 19 | group_wait: 10s 20 | group_interval: 1m 21 | repeat_interval: 1h 22 | group_by: ['alertname', 'instance', 'cluster', 'service'] 23 | routes: 24 | - receiver: 'default' 25 | match: 26 | severity: 'warning' 27 | - receiver: 'dingtalk' 28 | match: 29 | severity: 'critical' 30 | inhibit_rules: 31 | - source_match: 32 | severity: 'critical' 33 | target_match: 34 | severity: 'warning' 35 | equal: ['alertname', 'instance', 'cluster', 'service'] 36 | receivers: 37 | - name: 'default' 38 | email_configs: 39 | - to: 'receiver@163.com' 40 | send_resolved: true 41 | - name: 'dingtalk' 42 | webhook_configs: 43 | - url: 'http://dingtalk:8060/dingtalk/webhook/send' 44 | send_resolved: true 45 | -------------------------------------------------------------------------------- /prometheus/alertmanager/alertmanager-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: alertmanager 5 | namespace: public-service 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: alertmanager 11 | template: 12 | metadata: 13 | name: alertmanager 14 | labels: 15 | app: alertmanager 16 | spec: 17 | containers: 18 | - name: alertmanager 19 | image: prom/alertmanager:latest 20 | imagePullPolicy: IfNotPresent 21 | ports: 22 | - containerPort: 9093 23 | env: 24 | - name: POD_IP 25 | valueFrom: 26 | fieldRef: 27 | apiVersion: v1 28 | fieldPath: status.podIP 29 | args: 30 | - "--config.file=/etc/alertmanager/config.yml" 31 | - "--storage.path=/alertmanager" 32 | - "--cluster.advertise-address=$(POD_IP):6783" #没有该参数会报错:Failed to get final advertise address 33 | volumeMounts: 34 | - name: config 35 | mountPath: /etc/alertmanager 36 | - name: templates 37 | mountPath: /etc/templates 38 | - name: alertmanager 39 | mountPath: /alertmanager 40 | volumes: 41 | - name: config 42 | configMap: 43 | name: alertmanager-config 44 | - name: templates 45 | configMap: 46 | name: alertmanager-templates 47 | - name: alertmanager 48 | emptyDir: {} 49 | -------------------------------------------------------------------------------- /apollo/README.md: -------------------------------------------------------------------------------- 1 | ### apollo 2 | 3 | - 数据库导入: 4 | 5 | 首先部署数据库,并导入sql 6 | 7 | ```bash 8 | git clone https://github.com/ctripcorp/apollo.git 9 | 10 | mysql -uroot -p123456789 < apollo/scripts/sql/apolloportaldb.sql 11 | 12 | mysql -uroot -p123456789 < apollo/scripts/sql/apolloconfigdb.sql 13 | ``` 14 | 15 | - 部署: 16 | 17 | > ConfigMap中注意修改mysql的ip:port及账号密码; 18 | > ConfigMap中注意根据namespace修改url; 19 | > ConfigMap中注意根据部署情况修改env。 20 | 21 | ```bash 22 | kubectl apply -f public-service-ns.yaml 23 | 24 | kubectl apply -f apollo-configservice/ 25 | 26 | kubectl apply -f apollo-adminservice/ 27 | 28 | kubectl apply -f apollo-portal/ 29 | ``` 30 | 31 | ```bash 32 | kubectl get svc -n public-service 33 | 34 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 35 | apollo-adminservice ClusterIP 10.110.239.51 8090/TCP 14s 36 | apollo-configservice ClusterIP 10.103.129.253 8080/TCP 14s 37 | apollo-portal ClusterIP 10.105.237.238 8070/TCP 13s 38 | 39 | kubectl get pod -n public-service 40 | 41 | NAME READY STATUS RESTARTS AGE 42 | apollo-adminservice-765497fbbc-htskm 1/1 Running 0 35s 43 | apollo-configservice-64b4d77457-8w59g 1/1 Running 0 35s 44 | apollo-portal-5f585dc954-th96h 1/1 Running 0 34s 45 | ``` 46 | 47 | - 访问ui: 48 | 49 | 添加hosts:`apollo.lzxlinux.com`,账号/密码:`apollo`/`admin`。 50 | 51 | --- 52 | -------------------------------------------------------------------------------- /gitlab/gitlab-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: gitlab-gitaly 5 | labels: 6 | app: gitlab 7 | component: gitaly 8 | spec: 9 | capacity: 10 | storage: 50Gi 11 | accessModes: 12 | - ReadWriteOnce 13 | persistentVolumeReclaimPolicy: Recycle 14 | nfs: 15 | server: 192.168.30.129 16 | path: /data/gitlab/gitaly 17 | 18 | --- 19 | apiVersion: v1 20 | kind: PersistentVolume 21 | metadata: 22 | name: gitlab-minio 23 | labels: 24 | app: gitlab 25 | component: minio 26 | spec: 27 | capacity: 28 | storage: 10Gi 29 | accessModes: 30 | - ReadWriteOnce 31 | persistentVolumeReclaimPolicy: Recycle 32 | nfs: 33 | server: 192.168.30.129 34 | path: /data/gitlab/minio 35 | 36 | --- 37 | apiVersion: v1 38 | kind: PersistentVolume 39 | metadata: 40 | name: gitlab-postgresql 41 | labels: 42 | app: gitlab 43 | component: postgresql 44 | spec: 45 | capacity: 46 | storage: 8Gi 47 | accessModes: 48 | - ReadWriteOnce 49 | persistentVolumeReclaimPolicy: Recycle 50 | nfs: 51 | server: 192.168.30.129 52 | path: /data/gitlab/postgresql 53 | 54 | --- 55 | apiVersion: v1 56 | kind: PersistentVolume 57 | metadata: 58 | name: gitlab-redis 59 | labels: 60 | app: gitlab 61 | component: redis 62 | spec: 63 | capacity: 64 | storage: 8Gi 65 | accessModes: 66 | - ReadWriteOnce 67 | persistentVolumeReclaimPolicy: Recycle 68 | nfs: 69 | server: 192.168.30.129 70 | path: /data/gitlab/redis 71 | -------------------------------------------------------------------------------- /gitlab/shared-secrets/shared-secrets-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: gitlab-shared-secrets 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: shared-secrets 9 | annotations: 10 | "helm.sh/hook": pre-install,pre-upgrade 11 | "helm.sh/hook-weight": "-5" 12 | "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation 13 | 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: Role 17 | metadata: 18 | name: gitlab-shared-secrets 19 | namespace: public-service 20 | labels: 21 | app: gitlab 22 | component: shared-secrets 23 | annotations: 24 | "helm.sh/hook": pre-install,pre-upgrade 25 | "helm.sh/hook-weight": "-5" 26 | "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation 27 | rules: 28 | - apiGroups: [""] 29 | resources: ["secrets"] 30 | verbs: ["get", "list", "create", "patch"] 31 | 32 | --- 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | kind: RoleBinding 35 | metadata: 36 | name: gitlab-shared-secrets 37 | namespace: public-service 38 | labels: 39 | app: gitlab 40 | component: shared-secrets 41 | annotations: 42 | "helm.sh/hook": pre-install,pre-upgrade 43 | "helm.sh/hook-weight": "-5" 44 | "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation 45 | roleRef: 46 | apiGroup: rbac.authorization.k8s.io 47 | kind: Role 48 | name: gitlab-shared-secrets 49 | subjects: 50 | - kind: ServiceAccount 51 | name: gitlab-shared-secrets 52 | namespace: public-service 53 | -------------------------------------------------------------------------------- /apollo/apollo-adminservice/apollo-adminservice-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: apollo-adminservice 5 | namespace: public-service 6 | labels: 7 | app: apollo-adminservice 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: apollo-adminservice 13 | template: 14 | metadata: 15 | labels: 16 | app: apollo-adminservice 17 | spec: 18 | containers: 19 | - name: apollo-adminservice 20 | image: apolloconfig/apollo-adminservice:latest 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - name: http 24 | containerPort: 8090 25 | protocol: TCP 26 | env: 27 | - name: SPRING_PROFILES_ACTIVE 28 | value: "github,kubernetes" 29 | resources: 30 | limits: 31 | cpu: "1000m" 32 | memory: "1024Mi" 33 | requests: 34 | cpu: "1000m" 35 | memory: "1024Mi" 36 | volumeMounts: 37 | - name: apollo-adminservice-config 38 | mountPath: /apollo-adminservice/config/application-github.properties 39 | subPath: application-github.properties 40 | volumes: 41 | - name: apollo-adminservice-config 42 | configMap: 43 | name: apollo-adminservice 44 | items: 45 | - key: application-github.properties 46 | path: application-github.properties 47 | defaultMode: 420 48 | -------------------------------------------------------------------------------- /apollo/apollo-configservice/apollo-configservice-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: apollo-configservice 5 | namespace: public-service 6 | labels: 7 | app: apollo-configservice 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: apollo-configservice 13 | template: 14 | metadata: 15 | labels: 16 | app: apollo-configservice 17 | spec: 18 | containers: 19 | - name: apollo-configservice 20 | image: apolloconfig/apollo-configservice:latest 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - name: http 24 | containerPort: 8080 25 | protocol: TCP 26 | env: 27 | - name: SPRING_PROFILES_ACTIVE 28 | value: "github,kubernetes" 29 | resources: 30 | limits: 31 | cpu: "1000m" 32 | memory: "1024Mi" 33 | requests: 34 | cpu: "1000m" 35 | memory: "1024Mi" 36 | volumeMounts: 37 | - name: apollo-configservice-config 38 | mountPath: /apollo-configservice/config/application-github.properties 39 | subPath: application-github.properties 40 | volumes: 41 | - name: apollo-configservice-config 42 | configMap: 43 | name: apollo-configservice 44 | items: 45 | - key: application-github.properties 46 | path: application-github.properties 47 | defaultMode: 420 48 | -------------------------------------------------------------------------------- /elfk+xpack/logstash/logstash.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: logstash 5 | namespace: log 6 | spec: 7 | selector: 8 | app: logstash 9 | ports: 10 | - protocol: TCP 11 | port: 5040 12 | nodePort: 30040 13 | type: NodePort 14 | 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: logstash 20 | namespace: log 21 | spec: 22 | replicas: 3 23 | strategy: 24 | rollingUpdate: 25 | maxSurge: 1 26 | maxUnavailable: 1 27 | selector: 28 | matchLabels: 29 | app: logstash 30 | template: 31 | metadata: 32 | labels: 33 | app: logstash 34 | spec: 35 | containers: 36 | - name: logstash 37 | image: logstash:7.10.1 38 | ports: 39 | - containerPort: 9600 40 | - containerPort: 5040 41 | resources: 42 | limits: 43 | cpu: 300m 44 | memory: 1000Mi 45 | requests: 46 | cpu: 300m 47 | memory: 500Mi 48 | volumeMounts: 49 | - name: config 50 | mountPath: /usr/share/logstash/config 51 | - name: pipeline 52 | mountPath: /usr/share/logstash/pipeline 53 | volumes: 54 | - name: config 55 | configMap: 56 | name: logstash-config 57 | items: 58 | - key: logstash.yml 59 | path: logstash.yml 60 | - name: pipeline 61 | configMap: 62 | name: logstash-config 63 | items: 64 | - key: logstash.conf 65 | path: logstash.conf 66 | -------------------------------------------------------------------------------- /redis/redis-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: nfs-pv0 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteMany 10 | nfs: 11 | server: 192.168.30.129 12 | path: /data/redis/cluster0 13 | 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolume 17 | metadata: 18 | name: nfs-pv1 19 | spec: 20 | capacity: 21 | storage: 1Gi 22 | accessModes: 23 | - ReadWriteMany 24 | nfs: 25 | server: 192.168.30.129 26 | path: /data/redis/cluster1 27 | 28 | --- 29 | apiVersion: v1 30 | kind: PersistentVolume 31 | metadata: 32 | name: nfs-pv2 33 | spec: 34 | capacity: 35 | storage: 1Gi 36 | accessModes: 37 | - ReadWriteMany 38 | nfs: 39 | server: 192.168.30.129 40 | path: /data/redis/cluster2 41 | 42 | --- 43 | apiVersion: v1 44 | kind: PersistentVolume 45 | metadata: 46 | name: nfs-pv3 47 | spec: 48 | capacity: 49 | storage: 1Gi 50 | accessModes: 51 | - ReadWriteMany 52 | nfs: 53 | server: 192.168.30.129 54 | path: /data/redis/cluster3 55 | 56 | --- 57 | apiVersion: v1 58 | kind: PersistentVolume 59 | metadata: 60 | name: nfs-pv4 61 | spec: 62 | capacity: 63 | storage: 1Gi 64 | accessModes: 65 | - ReadWriteMany 66 | nfs: 67 | server: 192.168.30.129 68 | path: /data/redis/cluster4 69 | 70 | --- 71 | apiVersion: v1 72 | kind: PersistentVolume 73 | metadata: 74 | name: nfs-pv5 75 | spec: 76 | capacity: 77 | storage: 1Gi 78 | accessModes: 79 | - ReadWriteMany 80 | nfs: 81 | server: 192.168.30.129 82 | path: /data/redis/cluster5 83 | -------------------------------------------------------------------------------- /elfk+xpack/logstash/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: logstash-config 5 | namespace: log 6 | data: 7 | logstash.yml: | 8 | http.host: "0" 9 | http.port: 9600 10 | path.config: /usr/share/logstash/pipeline 11 | config.reload.automatic: true 12 | xpack.monitoring.enabled: true 13 | xpack.monitoring.elasticsearch.username: logstash_system 14 | xpack.monitoring.elasticsearch.password: elk-2021 15 | xpack.monitoring.elasticsearch.hosts: ["http://elasticsearch:9200"] 16 | xpack.monitoring.collection.interval: 10s 17 | 18 | logstash.conf: | 19 | input { 20 | beats { 21 | port => 5040 22 | } 23 | } 24 | 25 | filter { 26 | if [type] == "nginx_access" { 27 | grok { 28 | match => { "message" => "%{COMBINEDAPACHELOG}" } 29 | } 30 | } 31 | if [type] == "tomcat_access" { 32 | grok { 33 | match => { "message" => "(?%{MONTHDAY}-%{MONTH}-%{YEAR} %{TIME}) %{LOGLEVEL:level} \[%{DATA:exception_info}\] %{GREEDYDATA:message}" } 34 | } 35 | } 36 | } 37 | 38 | output { 39 | if [type] == "nginx_access" { 40 | elasticsearch { 41 | hosts => ["elasticsearch:9200"] 42 | user => "elastic" 43 | password => "elk-2021" 44 | index => "nginx-log" 45 | } 46 | } 47 | if [type] == "tomcat_access" { 48 | elasticsearch { 49 | hosts => ["elasticsearch:9200"] 50 | user => "elastic" 51 | password => "elk-2021" 52 | index => "tomcat-log" 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /weave-scope/manifests/weave-scope-cluster-agent-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: weave-scope-cluster-agent 5 | namespace: weave 6 | labels: 7 | name: weave-scope-cluster-agent 8 | app: weave-scope 9 | weave-cloud-component: scope 10 | weave-scope-component: cluster-agent 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | name: weave-scope-cluster-agent 16 | app: weave-scope 17 | weave-cloud-component: scope 18 | weave-scope-component: cluster-agent 19 | template: 20 | metadata: 21 | labels: 22 | name: weave-scope-cluster-agent 23 | app: weave-scope 24 | weave-cloud-component: scope 25 | weave-scope-component: cluster-agent 26 | spec: 27 | serviceAccountName: weave-scope 28 | containers: 29 | - name: scope-cluster-agent 30 | image: docker.io/weaveworks/scope:1.13.1 31 | imagePullPolicy: IfNotPresent 32 | ports: 33 | - containerPort: 4041 34 | protocol: TCP 35 | args: 36 | - '--mode=probe' 37 | - '--probe-only' 38 | - '--probe.kubernetes.role=cluster' 39 | - '--probe.http.listen=:4041' 40 | - '--probe.publish.interval=4500ms' 41 | - '--probe.spy.interval=2s' 42 | - 'weave-scope-app.weave.svc.cluster.local:80' 43 | command: 44 | - /home/weave/scope 45 | env: [] 46 | resources: 47 | limits: 48 | memory: 2000Mi 49 | requests: 50 | cpu: 25m 51 | memory: 80Mi 52 | -------------------------------------------------------------------------------- /prometheus/prometheus/prometheus-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: public-service 6 | labels: 7 | app: prometheus 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: prometheus 13 | template: 14 | metadata: 15 | name: prometheus 16 | labels: 17 | app: prometheus 18 | spec: 19 | serviceAccountName: prometheus 20 | containers: 21 | - name: prometheus 22 | image: prom/prometheus:latest 23 | imagePullPolicy: IfNotPresent 24 | args: 25 | - '--storage.tsdb.path=/prometheus' 26 | - '--storage.tsdb.retention.time=30d' 27 | - '--config.file=/etc/prometheus/prometheus.yml' 28 | ports: 29 | - containerPort: 9090 30 | resources: 31 | requests: 32 | cpu: 500m 33 | memory: 500M 34 | limits: 35 | cpu: 500m 36 | memory: 500M 37 | volumeMounts: 38 | - name: config 39 | mountPath: /etc/prometheus 40 | - name: rules 41 | mountPath: /etc/prometheus-rules 42 | - name: prometheus 43 | mountPath: /prometheus 44 | volumes: 45 | - name: config 46 | configMap: 47 | name: prometheus-config 48 | - name: rules 49 | configMap: 50 | name: prometheus-rules 51 | - name: prometheus 52 | emptyDir: {} 53 | nodeSelector: 54 | node-role.kubernetes.io/master: "" 55 | tolerations: 56 | - key: node-role.kubernetes.io/master 57 | operator: Exists 58 | effect: NoSchedule 59 | -------------------------------------------------------------------------------- /elfk+xpack/alicloud-nas-elfk-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: alicloud-nas-elfk-pv0 5 | labels: 6 | alicloud-pvname: alicloud-nas-elfk-pv 7 | spec: 8 | capacity: 9 | storage: 500Gi 10 | accessModes: 11 | - ReadWriteMany 12 | csi: 13 | driver: nasplugin.csi.alibabacloud.com 14 | volumeHandle: alicloud-nas-elfk-pv0 15 | volumeAttributes: 16 | server: "xxx.cn-hangzhou.nas.aliyuncs.com" 17 | path: "/elfk-data/elasticsearch-0" 18 | mountOptions: 19 | - nolock,tcp,noresvport 20 | - vers=4 21 | 22 | --- 23 | apiVersion: v1 24 | kind: PersistentVolume 25 | metadata: 26 | name: alicloud-nas-elfk-pv1 27 | labels: 28 | alicloud-pvname: alicloud-nas-elfk-pv 29 | spec: 30 | capacity: 31 | storage: 500Gi 32 | accessModes: 33 | - ReadWriteMany 34 | csi: 35 | driver: nasplugin.csi.alibabacloud.com 36 | volumeHandle: alicloud-nas-elfk-pv1 37 | volumeAttributes: 38 | server: "xxx.cn-hangzhou.nas.aliyuncs.com" 39 | path: "/elfk-data/elasticsearch-1" 40 | mountOptions: 41 | - nolock,tcp,noresvport 42 | - vers=4 43 | 44 | --- 45 | apiVersion: v1 46 | kind: PersistentVolume 47 | metadata: 48 | name: alicloud-nas-elfk-pv2 49 | labels: 50 | alicloud-pvname: alicloud-nas-elfk-pv 51 | spec: 52 | capacity: 53 | storage: 500Gi 54 | accessModes: 55 | - ReadWriteMany 56 | csi: 57 | driver: nasplugin.csi.alibabacloud.com 58 | volumeHandle: alicloud-nas-elfk-pv2 59 | volumeAttributes: 60 | server: "xxx.cn-hangzhou.nas.aliyuncs.com" 61 | path: "/elfk-data/elasticsearch-2" 62 | mountOptions: 63 | - nolock,tcp,noresvport 64 | - vers=4 65 | -------------------------------------------------------------------------------- /harbor/harbor/templates/registry/registry-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: "{{ template "harbor.registry" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | REGISTRY_HTPASSWD: {{ .Values.registry.credentials.htpasswd | b64enc | quote }} 10 | REGISTRY_HTTP_SECRET: {{ .Values.registry.secret | default (randAlphaNum 16) | b64enc | quote }} 11 | REGISTRY_REDIS_PASSWORD: {{ (include "harbor.redis.password" .) | b64enc | quote }} 12 | {{- $storage := .Values.persistence.imageChartStorage }} 13 | {{- $type := $storage.type }} 14 | {{- if eq $type "azure" }} 15 | REGISTRY_STORAGE_AZURE_ACCOUNTKEY: {{ $storage.azure.accountkey | b64enc | quote }} 16 | {{- else if eq $type "gcs" }} 17 | GCS_KEY_DATA: {{ $storage.gcs.encodedkey | quote }} 18 | {{- else if eq $type "s3" }} 19 | {{- if $storage.s3.accesskey }} 20 | REGISTRY_STORAGE_S3_ACCESSKEY: {{ $storage.s3.accesskey | b64enc | quote }} 21 | {{- end }} 22 | {{- if $storage.s3.secretkey }} 23 | REGISTRY_STORAGE_S3_SECRETKEY: {{ $storage.s3.secretkey | b64enc | quote }} 24 | {{- end }} 25 | {{- else if eq $type "swift" }} 26 | REGISTRY_STORAGE_SWIFT_PASSWORD: {{ $storage.swift.password | b64enc | quote }} 27 | {{- if $storage.swift.secretkey }} 28 | REGISTRY_STORAGE_SWIFT_SECRETKEY: {{ $storage.swift.secretkey | b64enc | quote }} 29 | {{- end }} 30 | {{- if $storage.swift.accesskey }} 31 | REGISTRY_STORAGE_SWIFT_ACCESSKEY: {{ $storage.swift.accesskey | b64enc | quote }} 32 | {{- end }} 33 | {{- else if eq $type "oss" }} 34 | REGISTRY_STORAGE_OSS_ACCESSKEYSECRET: {{ $storage.oss.accesskeysecret | b64enc | quote }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /elfk/filebeat/filebeat-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: filebeat-config-nginx 5 | namespace: default 6 | labels: 7 | app: filebeat 8 | data: 9 | filebeat.yml: |- 10 | filebeat.config: 11 | inputs: 12 | path: ${path.config}/inputs.d/*.yml 13 | reload.enabled: false 14 | 15 | modules: 16 | path: ${path.config}/modules.d/*.yml 17 | reload.enabled: false 18 | filebeat.inputs: 19 | - type: log 20 | paths: 21 | - /logdata/*.log 22 | tail_files: true 23 | fields: 24 | pod_name: '${pod_name}' 25 | POD_IP: '${POD_IP}' 26 | setup.template.name: "nginx-logs" 27 | setup.template.pattern: "nginx-logs-*" 28 | 29 | output.logstash: 30 | hosts: ["10.96.103.207:5044"] #指定logstash ClusterIP及端口 31 | index: "nginx-logs" 32 | 33 | --- 34 | apiVersion: v1 35 | kind: ConfigMap 36 | metadata: 37 | name: filebeat-config-tomcat 38 | namespace: default 39 | labels: 40 | app: filebeat 41 | data: 42 | filebeat.yml: |- 43 | filebeat.config: 44 | inputs: 45 | path: ${path.config}/inputs.d/*.yml 46 | reload.enabled: false 47 | 48 | modules: 49 | path: ${path.config}/modules.d/*.yml 50 | reload.enabled: false 51 | filebeat.inputs: 52 | - type: log 53 | paths: 54 | - /logdata/*.log 55 | tail_files: true 56 | fields: 57 | pod_name: '${pod_name}' 58 | POD_IP: '${POD_IP}' 59 | setup.template.name: "tomcat-logs" 60 | setup.template.pattern: "tomcat-logs-*" 61 | 62 | output.logstash: 63 | hosts: ["10.96.103.207:5044"] 64 | index: "tomcat-logs" 65 | -------------------------------------------------------------------------------- /gitlab/upgrade-check/upgrade-check-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: gitlab-upgrade-check 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: gitlab 9 | annotations: 10 | "helm.sh/hook": pre-upgrade 11 | "helm.sh/hook-weight": "-10" 12 | "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation 13 | spec: 14 | backoffLimit: 2 15 | template: 16 | metadata: 17 | labels: 18 | app: gitlab 19 | component: gitlab 20 | spec: 21 | containers: 22 | - name: run-check 23 | image: busybox:latest 24 | imagePullPolicy: IfNotPresent 25 | command: ['/bin/sh', '/scripts/runcheck'] 26 | env: 27 | - name: GITLAB_VERSION 28 | value: '13.4.3' 29 | - name: CHART_VERSION 30 | value: '4.4.3' 31 | resources: 32 | requests: 33 | cpu: 50m 34 | volumeMounts: 35 | - name: chart-info 36 | mountPath: /chart-info 37 | - name: scripts 38 | mountPath: /scripts 39 | - name: postgresql-secret 40 | mountPath: /etc/secrets/postgresql 41 | securityContext: 42 | runAsUser: 65534 43 | fsGroup: 65534 44 | restartPolicy: Never 45 | volumes: 46 | - name: chart-info 47 | configMap: 48 | name: gitlab-chart-info 49 | optional: true 50 | - name: scripts 51 | configMap: 52 | name: gitlab-upgrade-check 53 | - name: postgresql-secret 54 | secret: 55 | secretName: "gitlab-postgresql-password" 56 | optional: true 57 | -------------------------------------------------------------------------------- /harbor/harbor/templates/jobservice/jobservice-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: "{{ template "harbor.jobservice" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | data: 8 | config.yml: |+ 9 | #Server listening port 10 | protocol: "{{ template "harbor.component.scheme" . }}" 11 | port: {{ template "harbor.jobservice.containerPort". }} 12 | {{- if .Values.internalTLS.enabled }} 13 | https_config: 14 | cert: "/etc/harbor/ssl/jobservice/tls.crt" 15 | key: "/etc/harbor/ssl/jobservice/tls.key" 16 | {{- end }} 17 | worker_pool: 18 | workers: {{ .Values.jobservice.maxJobWorkers }} 19 | backend: "redis" 20 | redis_pool: 21 | redis_url: "{{ template "harbor.redis.urlForJobservice" . }}" 22 | namespace: "harbor_job_service_namespace" 23 | idle_timeout_second: 3600 24 | job_loggers: 25 | {{- if eq .Values.jobservice.jobLogger "file" }} 26 | - name: "FILE" 27 | level: {{ .Values.logLevel | upper }} 28 | settings: # Customized settings of logger 29 | base_dir: "/var/log/jobs" 30 | sweeper: 31 | duration: 14 #days 32 | settings: # Customized settings of sweeper 33 | work_dir: "/var/log/jobs" 34 | {{- else if eq .Values.jobservice.jobLogger "database" }} 35 | - name: "DB" 36 | level: {{ .Values.logLevel | upper }} 37 | sweeper: 38 | duration: 14 #days 39 | {{- else }} 40 | - name: "STD_OUTPUT" 41 | level: {{ .Values.logLevel | upper }} 42 | {{- end }} 43 | #Loggers for the job service 44 | loggers: 45 | - name: "STD_OUTPUT" 46 | level: {{ .Values.logLevel | upper }} -------------------------------------------------------------------------------- /apollo/apollo-portal/apollo-portal-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: apollo-portal 5 | namespace: public-service 6 | labels: 7 | app: apollo-portal 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: apollo-portal 13 | template: 14 | metadata: 15 | labels: 16 | app: apollo-portal 17 | spec: 18 | containers: 19 | - name: apollo-portal 20 | image: apolloconfig/apollo-portal:latest 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - name: http 24 | containerPort: 8070 25 | protocol: TCP 26 | env: 27 | - name: SPRING_PROFILES_ACTIVE 28 | value: "github,auth" 29 | resources: 30 | limits: 31 | cpu: "1000m" 32 | memory: "1024Mi" 33 | requests: 34 | cpu: "1000m" 35 | memory: "1024Mi" 36 | volumeMounts: 37 | - name: apollo-portal-config 38 | mountPath: /apollo-portal/config/application-github.properties 39 | subPath: application-github.properties 40 | - name: apollo-portal-config 41 | mountPath: /apollo-portal/config/apollo-env.properties 42 | subPath: apollo-env.properties 43 | volumes: 44 | - name: apollo-portal-config 45 | configMap: 46 | name: apollo-portal 47 | items: 48 | - key: application-github.properties 49 | path: application-github.properties 50 | - key: apollo-env.properties 51 | path: apollo-env.properties 52 | defaultMode: 420 53 | -------------------------------------------------------------------------------- /gitlab/gitaly/gitaly-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-gitaly 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: registry 9 | data: 10 | configure: | 11 | set -e 12 | mkdir -p /init-secrets/gitaly /init-secrets/shell 13 | cp -v -r -L /init-config/.gitlab_shell_secret /init-secrets/shell/.gitlab_shell_secret 14 | cp -v -r -L /init-config/gitaly_token /init-secrets/gitaly/gitaly_token 15 | 16 | config.toml.erb: | 17 | bin_dir = "/usr/local/bin" 18 | listen_addr = "0.0.0.0:8075" 19 | internal_socket_dir = "/home/git" 20 | prometheus_listen_addr = "localhost:9236" 21 | 22 | <% @storages = [ "default", ] %> 23 | <% @index=`echo ${HOSTNAME##*-}`.to_i %> 24 | <% if @storages.length > @index %> 25 | [[storage]] 26 | name = "<%= @storages[@index] %>" 27 | path = "/home/git/repositories" 28 | <% else %> 29 | <% raise Exception, "Storage for node #{@index} is not present in the storageNames array. Did you use kubectl to scale up ? You need to solely use helm for this purpose" %> 30 | <% end %> 31 | 32 | [logging] 33 | format = "json" 34 | dir = "/var/log/gitaly" 35 | 36 | [auth] 37 | token = "<%= File.read('/etc/gitlab-secrets/gitaly/gitaly_token').strip.dump[1..-2] %>" 38 | 39 | [git] 40 | 41 | [gitaly-ruby] 42 | dir = "/srv/gitaly-ruby" 43 | rugged_git_config_search_path = "/usr/local/etc" 44 | 45 | [gitlab-shell] 46 | dir = "/srv/gitlab-shell" 47 | 48 | [gitlab] 49 | secret_file = "/etc/gitlab-secrets/shell/.gitlab_shell_secret" 50 | url = "http://gitlab-webservice:8181/" 51 | 52 | [gitlab.http-settings] 53 | self_signed_cert = false 54 | 55 | [hooks] 56 | custom_hooks_dir = "/home/git/custom_hooks" 57 | -------------------------------------------------------------------------------- /harbor/harbor/cert/tls.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE0zCCArugAwIBAgIJAPY/OzLMeVq2MA0GCSqGSIb3DQEBCwUAMAAwHhcNMTkw 3 | NDE4MDIyNzM3WhcNMjkwNDE1MDIyNzM3WjAAMIICIjANBgkqhkiG9w0BAQEFAAOC 4 | Ag8AMIICCgKCAgEA3xlUJs2b/aI2NLoy4OIQ+dn/yMb/O99iKDRyZKpH8rSOmS+o 5 | F9unmSAzL65XA/v6nY0OLI/dASDjkqkBpIdTGzogR5f8UiB6osuEY7V71XZdzWLr 6 | PjnJq6ZLAaoKmwG80W5+Wd6V8PygOx52mkr1w7IWKz+1ZLI5izbppon7XVGVRaAT 7 | RvNZDiJ6CeJpcJ5H723lkf5RvJWatZLCYIYDbRfTiKsyQ/SlRcv5BVfHg/LJSH9Q 8 | LGRhPMARldl9wyZCwZZDHxheI4a+26aa8MY3u9st/l0/Oo6VCTGpMiEhiGF2LVjp 9 | UWq/+BP4SFEvJfq/DuinI139W/5aZZ7/HwRPlmYU6pXTRLyIg7jd+19fJwR7X37q 10 | w0o8t06FhjmrCzaYCUjoReqDmHaNmZN/ddvG7jZWBu+jNh0YavsyQyCIVmv6yqSc 11 | jPiD9uivxqTwjJidIBRfuUrz3aERQ7cQgf0qhqjIzflzHbFKhILocBWq7zyNl9hr 12 | vUGT/WZcw0t/OtM72SPaplmTgVbbQRxf2VHzyptGIvtydlXK8thxOMpXo4e+Sl8d 13 | 1gdQcC4oisN9F29oNs8P5yFQP//xYuv8C607nCj1DzrId5avG/NVfKB/fbDKEFgN 14 | 2WhHInTzPLEcjF4fErcUAEuWW0buX/6FHCG3iTtrqyD92KTVDfN1J56rrcsCAwEA 15 | AaNQME4wHQYDVR0OBBYEFFhNhTo4UAC2PUsf8jYaWj160vGEMB8GA1UdIwQYMBaA 16 | FFhNhTo4UAC2PUsf8jYaWj160vGEMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL 17 | BQADggIBAMAsEtVlELMwdtcifHeOT0kOmf5wo9In/eFSgscCzBMDaRx2B3q36AoS 18 | Il7XWAZpevaR7W7yeARKaAshBLhygUqLD0zWbKlSN9Hprd1wdpM0ffyPpN5dxOYA 19 | er04y12GRnCbMYqi4cvztP4TinXqq2yHSYhLbO9qkI5gbWVxkRuIcMKvixddllNY 20 | Q3obJaDDHmovM3+g/G+1YFgt4qES38XnJ7BrSshHnn5EIQh286xfJriyrK2hHbLJ 21 | qz0YuF6G3DXPeWGgXvj0Hipc0f8UDZkKkk/eGEI6vEkytyvoepoZI2XbAf/ZMy5n 22 | KwuhEn4hhkFMwWaSWp/h0QdMCaxk4BVSOqmNVaLSB7+FjsIj4CasFotYiyJ2gpRB 23 | Nf8QaS4bz0Tn1eBbC8ksj+e3ZWeX2b5wVMjql9jTt2X1ICs8KKe3vEBkjqT2AUi2 24 | 52TtKzm73aWrz/GPy/Q2LCor3Fh9FGVSBOBBDXGy6MJpNHJnYVH9EENFGOh85ol1 25 | 2pADOBB5vAU/kLB5LHPj2kue/FMiHaNnrSYIGrMlBSX2jj9EYa1uuUH+pd4MBj1F 26 | 5uH8ORiaQ6ht2+WHklxic1Rj5yTYQwVlH70CBOn+qVEdo63yQwzAMJKFIwlGUQEX 27 | jiljgc86q4cZtUTFrcwMidbk+8Q6+JbDVg7HV/+pnC+wnv197kwe 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /zabbix/zabbix-web/zabbix-web.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: zabbix-web 5 | namespace: monitoring 6 | spec: 7 | rules: 8 | - host: zabbix.lzxlinux.cn 9 | http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: zabbix-web 14 | servicePort: 8080 15 | 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: zabbix-web 21 | namespace: monitoring 22 | labels: 23 | app: zabbix-web 24 | spec: 25 | selector: 26 | app: zabbix-web 27 | ports: 28 | - name: web 29 | port: 8080 30 | protocol: TCP 31 | 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | name: zabbix-web 37 | namespace: monitoring 38 | spec: 39 | replicas: 1 40 | selector: 41 | matchLabels: 42 | app: zabbix-web 43 | template: 44 | metadata: 45 | labels: 46 | app: zabbix-web 47 | spec: 48 | containers: 49 | - name: zabbix-web 50 | image: zabbix/zabbix-web-nginx-mysql:latest 51 | imagePullPolicy: IfNotPresent 52 | env: 53 | - name: DB_SERVER_HOST 54 | value: mysql 55 | - name: MYSQL_DATABASE 56 | value: zabbix 57 | - name: MYSQL_USER 58 | value: zabbix 59 | - name: MYSQL_PASSWORD 60 | value: zabbix 61 | - name: MYSQL_ROOT_PASSWORD 62 | value: zabbix 63 | - name: ZBX_SERVER_HOST 64 | value: zabbix-server 65 | - name: PHP_TZ 66 | value: "Asia/Shanghai" 67 | ports: 68 | - containerPort: 8080 69 | name: web 70 | protocol: TCP 71 | resources: 72 | requests: 73 | cpu: 500m 74 | memory: 500Mi 75 | limits: 76 | cpu: 500m 77 | memory: 500Mi 78 | -------------------------------------------------------------------------------- /redis/redis-sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: redis 5 | namespace: public-service 6 | spec: 7 | serviceName: redis 8 | replicas: 6 9 | template: 10 | metadata: 11 | labels: 12 | app: redis 13 | appCluster: redis-cluster 14 | spec: 15 | terminationGracePeriodSeconds: 20 16 | affinity: 17 | podAntiAffinity: 18 | preferredDuringSchedulingIgnoredDuringExecution: 19 | - weight: 100 20 | podAffinityTerm: 21 | labelSelector: 22 | matchExpressions: 23 | - key: app 24 | operator: In 25 | values: 26 | - redis 27 | topologyKey: kubernetes.io/hostname 28 | containers: 29 | - name: redis 30 | image: redis:latest 31 | command: 32 | - "redis-server" 33 | args: 34 | - "/etc/redis/redis.conf" 35 | - "--protected-mode" 36 | - "no" 37 | resources: 38 | requests: 39 | cpu: "500m" 40 | memory: "500Mi" 41 | ports: 42 | - containerPort: 6379 43 | name: redis 44 | protocol: TCP 45 | - containerPort: 16379 46 | name: cluster 47 | protocol: TCP 48 | volumeMounts: 49 | - name: conf 50 | mountPath: /etc/redis 51 | - name: data 52 | mountPath: /var/lib/redis 53 | volumes: 54 | - name: conf 55 | configMap: 56 | name: redis-conf 57 | items: 58 | - key: redis.conf 59 | path: redis.conf 60 | volumeClaimTemplates: 61 | - metadata: 62 | name: data 63 | namespace: public-service 64 | spec: 65 | accessModes: [ "ReadWriteMany" ] 66 | resources: 67 | requests: 68 | storage: 1Gi 69 | -------------------------------------------------------------------------------- /prometheus/grafana/grafana-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: grafana 5 | namespace: public-service 6 | labels: 7 | app: grafana 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | template: 14 | metadata: 15 | labels: 16 | app: grafana 17 | spec: 18 | containers: 19 | - name: grafana 20 | image: grafana/grafana:latest 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - containerPort: 3000 24 | name: grafana 25 | resources: 26 | limits: 27 | cpu: 100m 28 | memory: 100Mi 29 | requests: 30 | cpu: 100m 31 | memory: 100Mi 32 | env: 33 | - name: GF_AUTH_BASIC_ENABLED 34 | value: "true" 35 | - name: GF_AUTH_ANONYMOUS_ENABLED 36 | value: "false" 37 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 38 | value: Admin 39 | - name: GF_DASHBOARDS_JSON_ENABLED 40 | value: "true" 41 | - name: GF_INSTALL_PLUGINS 42 | value: grafana-kubernetes-app #安装grafana-kubernetes-app插件 43 | - name: GF_SECURITY_ADMIN_USER 44 | valueFrom: 45 | secretKeyRef: 46 | name: grafana 47 | key: admin-username 48 | - name: GF_SECURITY_ADMIN_PASSWORD 49 | valueFrom: 50 | secretKeyRef: 51 | name: grafana 52 | key: admin-password 53 | readinessProbe: 54 | httpGet: 55 | path: /login 56 | port: 3000 57 | initialDelaySeconds: 10 58 | timeoutSeconds: 5 59 | volumeMounts: 60 | - name: grafana-storage 61 | mountPath: /var/lib/grafana 62 | volumes: 63 | - name: grafana-storage 64 | emptyDir: {} 65 | -------------------------------------------------------------------------------- /elfk/filebeat/nginx-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | namespace: default 6 | spec: 7 | replicas: 1 8 | minReadySeconds: 15 9 | strategy: 10 | rollingUpdate: 11 | maxSurge: 1 12 | maxUnavailable: 1 13 | selector: 14 | matchLabels: 15 | app: nginx 16 | template: 17 | metadata: 18 | labels: 19 | app: nginx 20 | spec: 21 | terminationGracePeriodSeconds: 30 22 | containers: 23 | - name: nginx 24 | image: nginx:latest 25 | ports: 26 | - containerPort: 80 27 | volumeMounts: 28 | - name: logdata 29 | mountPath: /var/log/nginx 30 | - name: filebeat 31 | image: docker.elastic.co/beats/filebeat-oss:7.6.2 32 | args: [ 33 | "-c", "/etc/filebeat/filebeat.yml", 34 | "-e", 35 | ] 36 | env: 37 | - name: POD_IP 38 | valueFrom: 39 | fieldRef: 40 | apiVersion: v1 41 | fieldPath: status.podIP 42 | - name: pod_name 43 | valueFrom: 44 | fieldRef: 45 | apiVersion: v1 46 | fieldPath: metadata.name 47 | securityContext: 48 | runAsUser: 0 49 | resources: 50 | limits: 51 | memory: 200Mi 52 | requests: 53 | cpu: 200m 54 | memory: 200Mi 55 | volumeMounts: 56 | - name: config 57 | mountPath: /etc/filebeat/ 58 | - name: data 59 | mountPath: /usr/share/filebeat/data 60 | - name: logdata 61 | mountPath: /logdata 62 | volumes: 63 | - name: data 64 | emptyDir: {} 65 | - name: logdata 66 | emptyDir: {} 67 | - name: config 68 | configMap: 69 | name: filebeat-config-nginx 70 | items: 71 | - key: filebeat.yml 72 | path: filebeat.yml 73 | -------------------------------------------------------------------------------- /elfk/filebeat/tomcat-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: tomcat 5 | namespace: default 6 | spec: 7 | replicas: 1 8 | minReadySeconds: 15 9 | strategy: 10 | rollingUpdate: 11 | maxSurge: 1 12 | maxUnavailable: 1 13 | selector: 14 | matchLabels: 15 | app: tomcat 16 | template: 17 | metadata: 18 | labels: 19 | app: tomcat 20 | spec: 21 | terminationGracePeriodSeconds: 30 22 | containers: 23 | - name: tomcat 24 | image: tomcat:latest 25 | ports: 26 | - containerPort: 8080 27 | volumeMounts: 28 | - name: logdata 29 | mountPath: /usr/local/tomcat/logs 30 | - name: filebeat 31 | image: docker.elastic.co/beats/filebeat-oss:7.6.2 32 | args: [ 33 | "-c", "/etc/filebeat/filebeat.yml", 34 | "-e", 35 | ] 36 | env: 37 | - name: POD_IP 38 | valueFrom: 39 | fieldRef: 40 | apiVersion: v1 41 | fieldPath: status.podIP 42 | - name: pod_name 43 | valueFrom: 44 | fieldRef: 45 | apiVersion: v1 46 | fieldPath: metadata.name 47 | securityContext: 48 | runAsUser: 0 49 | resources: 50 | limits: 51 | memory: 200Mi 52 | requests: 53 | cpu: 200m 54 | memory: 200Mi 55 | volumeMounts: 56 | - name: config 57 | mountPath: /etc/filebeat/ 58 | - name: data 59 | mountPath: /usr/share/filebeat/data 60 | - name: logdata 61 | mountPath: /logdata 62 | volumes: 63 | - name: data 64 | emptyDir: {} 65 | - name: logdata 66 | emptyDir: {} 67 | - name: config 68 | configMap: 69 | name: filebeat-config-tomcat 70 | items: 71 | - key: filebeat.yml 72 | path: filebeat.yml 73 | -------------------------------------------------------------------------------- /harbor/harbor/.github/workflows/integration.yaml: -------------------------------------------------------------------------------- 1 | name: Integration test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | 7 | jobs: 8 | integration-test: 9 | runs-on: ubuntu-latest 10 | strategy: 11 | matrix: 12 | k8s_version: [v1.18.2, v1.17.5, v1.16.9] 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v2 16 | 17 | - name: Setup Docker 18 | uses: docker-practice/actions-setup-docker@0.0.1 19 | with: 20 | docker_version: 18.09 21 | docker_channel: stable 22 | docker_daemon_json: '{"insecure-registries":["0.0.0.0/0"]}' 23 | 24 | - name: Create kind cluster 25 | uses: helm/kind-action@v1.0.0-rc.1 26 | with: 27 | version: v0.8.1 28 | node_image: kindest/node:${{ matrix.k8s_version }} 29 | cluster_name: kind-cluster-${{ matrix.k8s_version }} 30 | config: test/integration/kind-cluster.yaml 31 | 32 | - name: Install Nginx ingress controller 33 | run: | 34 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/ingress-nginx-2.3.0/deploy/static/provider/kind/deploy.yaml 35 | kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=120s 36 | 37 | - name: Set up Go 1.13 38 | uses: actions/setup-go@v2 39 | with: 40 | go-version: 1.13 41 | 42 | - name: Cache go mod 43 | uses: actions/cache@v2 44 | with: 45 | path: ~/go/pkg/mod 46 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 47 | restore-keys: | 48 | ${{ runner.os }}-go- 49 | 50 | - name: Set /etc/hosts 51 | run: | 52 | sudo -- sh -c "echo '127.0.0.1 harbor.local' >> /etc/hosts" 53 | sudo -- sh -c "echo '127.0.0.1 notary.harbor.local' >> /etc/hosts" 54 | 55 | - name: Run integration tests 56 | working-directory: ./test 57 | run: 58 | go test -v -timeout 30m github.com/goharbor/harbor-helm/integration -------------------------------------------------------------------------------- /harbor/harbor/templates/portal/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: "{{ template "harbor.portal" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | data: 8 | nginx.conf: |+ 9 | worker_processes auto; 10 | pid /tmp/nginx.pid; 11 | events { 12 | worker_connections 1024; 13 | } 14 | http { 15 | client_body_temp_path /tmp/client_body_temp; 16 | proxy_temp_path /tmp/proxy_temp; 17 | fastcgi_temp_path /tmp/fastcgi_temp; 18 | uwsgi_temp_path /tmp/uwsgi_temp; 19 | scgi_temp_path /tmp/scgi_temp; 20 | server { 21 | {{- if .Values.internalTLS.enabled }} 22 | listen {{ template "harbor.portal.containerPort" . }} ssl; 23 | # SSL 24 | ssl_certificate /etc/harbor/ssl/portal/tls.crt; 25 | ssl_certificate_key /etc/harbor/ssl/portal/tls.key; 26 | 27 | # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html 28 | ssl_protocols TLSv1.2; 29 | ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; 30 | ssl_prefer_server_ciphers on; 31 | ssl_session_cache shared:SSL:10m; 32 | {{- else }} 33 | listen {{ template "harbor.portal.containerPort" . }}; 34 | {{- end }} 35 | server_name localhost; 36 | root /usr/share/nginx/html; 37 | index index.html index.htm; 38 | include /etc/nginx/mime.types; 39 | gzip on; 40 | gzip_min_length 1000; 41 | gzip_proxied expired no-cache no-store private auth; 42 | gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript; 43 | location / { 44 | try_files $uri $uri/ /index.html; 45 | } 46 | location = /index.html { 47 | add_header Cache-Control "no-store, no-cache, must-revalidate"; 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /elfk+xpack/filebeat/filebeat-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx 5 | namespace: default 6 | labels: 7 | app: nginx 8 | spec: 9 | selector: 10 | app: nginx 11 | ports: 12 | - port: 80 13 | nodePort: 30080 14 | type: NodePort 15 | 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: nginx 21 | namespace: default 22 | spec: 23 | replicas: 1 24 | minReadySeconds: 15 25 | selector: 26 | matchLabels: 27 | app: nginx 28 | template: 29 | metadata: 30 | labels: 31 | app: nginx 32 | spec: 33 | containers: 34 | - name: nginx 35 | image: nginx:1.17.0 36 | ports: 37 | - containerPort: 80 38 | volumeMounts: 39 | - name: nginx-log 40 | mountPath: /var/log/nginx 41 | - name: filebeat 42 | image: elastic/filebeat:7.10.1 43 | args: [ 44 | "-c", "/etc/filebeat/filebeat.yml", 45 | "-e", 46 | ] 47 | env: 48 | - name: POD_IP 49 | valueFrom: 50 | fieldRef: 51 | apiVersion: v1 52 | fieldPath: status.podIP 53 | - name: pod_name 54 | valueFrom: 55 | fieldRef: 56 | apiVersion: v1 57 | fieldPath: metadata.name 58 | resources: 59 | limits: 60 | cpu: 100m 61 | memory: 100Mi 62 | requests: 63 | cpu: 100m 64 | memory: 100Mi 65 | securityContext: 66 | runAsUser: 0 67 | volumeMounts: 68 | - name: config 69 | mountPath: /etc/filebeat 70 | readOnly: true 71 | - name: data 72 | mountPath: /usr/share/filebeat/data 73 | - name: nginx-log 74 | mountPath: /nginxlog 75 | volumes: 76 | - name: config 77 | configMap: 78 | name: filebeat-config 79 | items: 80 | - key: filebeat.yml 81 | path: filebeat.yml 82 | - name: data 83 | emptyDir: {} 84 | - name: nginx-log 85 | emptyDir: {} 86 | -------------------------------------------------------------------------------- /zabbix/mysql/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql 5 | namespace: monitoring 6 | labels: 7 | app: mysql 8 | spec: 9 | selector: 10 | app: mysql 11 | ports: 12 | - name: mysql 13 | port: 3306 14 | protocol: TCP 15 | targetPort: 3306 16 | clusterIP: None 17 | 18 | --- 19 | apiVersion: apps/v1 20 | kind: StatefulSet 21 | metadata: 22 | name: mysql 23 | namespace: monitoring 24 | spec: 25 | serviceName: mysql 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | app: mysql 30 | template: 31 | metadata: 32 | labels: 33 | app: mysql 34 | spec: 35 | containers: 36 | - name: mysql 37 | image: mysql:5.7.28 38 | imagePullPolicy: IfNotPresent 39 | args: 40 | - "--character-set-server=utf8" 41 | - "--collation-server=utf8_bin" 42 | - "--default-authentication-plugin=mysql_native_password" 43 | env: 44 | - name: MYSQL_DATABASE 45 | value: "zabbix" 46 | - name: MYSQL_USER 47 | value: "zabbix" 48 | - name: MYSQL_PASSWORD 49 | value: "zabbix" 50 | - name: MYSQL_ROOT_PASSWORD 51 | value: "zabbix" 52 | ports: 53 | - containerPort: 3306 54 | name: mysql 55 | protocol: TCP 56 | resources: 57 | requests: 58 | cpu: 1000m 59 | memory: 1000Mi 60 | limits: 61 | cpu: 2000m 62 | memory: 2000Mi 63 | volumeMounts: 64 | - name: timezone 65 | mountPath: /etc/localtime 66 | - name: data 67 | mountPath: /var/lib/mysql 68 | terminationGracePeriodSeconds: 20 69 | volumes: 70 | - name: timezone 71 | hostPath: 72 | path: /usr/share/zoneinfo/Asia/Shanghai 73 | volumeClaimTemplates: 74 | - metadata: 75 | name: data 76 | namespace: monitoring 77 | spec: 78 | selector: 79 | matchLabels: 80 | pvname: nfs-mysql-pv 81 | accessModes: [ "ReadWriteMany" ] 82 | resources: 83 | requests: 84 | storage: 20Gi 85 | -------------------------------------------------------------------------------- /elfk+xpack/filebeat/filebeat-tomcat.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: tomcat 5 | namespace: default 6 | labels: 7 | app: tomcat 8 | spec: 9 | selector: 10 | app: tomcat 11 | ports: 12 | - port: 8080 13 | nodePort: 30880 14 | type: NodePort 15 | 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: tomcat 21 | namespace: default 22 | spec: 23 | replicas: 1 24 | minReadySeconds: 15 25 | selector: 26 | matchLabels: 27 | app: tomcat 28 | template: 29 | metadata: 30 | labels: 31 | app: tomcat 32 | spec: 33 | terminationGracePeriodSeconds: 30 34 | containers: 35 | - name: tomcat 36 | image: tomcat:8.0.51-alpine 37 | ports: 38 | - containerPort: 8080 39 | volumeMounts: 40 | - name: tomcat-log 41 | mountPath: /usr/local/tomcat/logs 42 | - name: filebeat 43 | image: elastic/filebeat:7.10.1 44 | args: [ 45 | "-c", "/etc/filebeat/filebeat.yml", 46 | "-e", 47 | ] 48 | env: 49 | - name: POD_IP 50 | valueFrom: 51 | fieldRef: 52 | apiVersion: v1 53 | fieldPath: status.podIP 54 | - name: pod_name 55 | valueFrom: 56 | fieldRef: 57 | apiVersion: v1 58 | fieldPath: metadata.name 59 | resources: 60 | limits: 61 | cpu: 100m 62 | memory: 100Mi 63 | requests: 64 | cpu: 100m 65 | memory: 100Mi 66 | securityContext: 67 | runAsUser: 0 68 | volumeMounts: 69 | - name: config 70 | mountPath: /etc/filebeat 71 | readOnly: true 72 | - name: data 73 | mountPath: /usr/share/filebeat/data 74 | - name: tomcat-log 75 | mountPath: /tomcatlog 76 | volumes: 77 | - name: config 78 | configMap: 79 | name: filebeat-config 80 | items: 81 | - key: filebeat.yml 82 | path: filebeat.yml 83 | - name: data 84 | emptyDir: {} 85 | - name: tomcat-log 86 | emptyDir: {} 87 | -------------------------------------------------------------------------------- /gitlab/migrations/migrations-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-migrations 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: migrations 9 | data: 10 | installation_type: | 11 | gitlab-helm-chart 12 | 13 | database.yml.erb: | 14 | production: 15 | adapter: postgresql 16 | encoding: unicode 17 | database: gitlabhq_production 18 | username: gitlab 19 | password: "<%= File.read("/etc/gitlab/postgres/psql-password").strip.dump[1..-2] %>" 20 | host: "gitlab-postgresql" 21 | port: 5432 22 | pool: 1 23 | connect_timeout: 24 | prepared_statements: false 25 | 26 | resque.yml.erb: | 27 | production: 28 | url: redis://:<%= ERB::Util::url_encode(File.read("/etc/gitlab/redis/redis-password").strip) %>@gitlab-redis-master:6379 29 | id: 30 | 31 | cable.yml.erb: | 32 | production: 33 | url: redis://:<%= ERB::Util::url_encode(File.read("/etc/gitlab/redis/redis-password").strip) %>@gitlab-redis-master:6379 34 | id: 35 | adapter: redis 36 | 37 | gitlab.yml.erb: | 38 | production: &base 39 | gitlab: 40 | host: gitlab.lzxlinux.com 41 | gitaly: 42 | client_path: /home/git/gitaly/bin 43 | token: "<%= File.read('/etc/gitlab/gitaly/gitaly_token').strip.dump[1..-2] %>" 44 | repositories: 45 | storages: # You must have at least a `default` storage path. 46 | default: 47 | path: /var/opt/gitlab/repo 48 | gitaly_address: tcp://gitlab-gitaly-0.gitlab-gitaly.public-service:8075 49 | 50 | configure: | 51 | set -e 52 | config_dir="/init-config" 53 | secret_dir="/init-secrets" 54 | 55 | for secret in rails-secrets migrations gitaly ; do 56 | mkdir -p "${secret_dir}/${secret}" 57 | cp -v -r -L "${config_dir}/${secret}/." "${secret_dir}/${secret}/" 58 | done 59 | for secret in redis minio objectstorage postgres ldap omniauth smtp kas ; do 60 | if [ -e "${config_dir}/${secret}" ]; then 61 | mkdir -p "${secret_dir}/${secret}" 62 | cp -v -r -L "${config_dir}/${secret}/." "${secret_dir}/${secret}/" 63 | fi 64 | done 65 | -------------------------------------------------------------------------------- /harbor/harbor-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: harbor-chartmuseum 5 | labels: 6 | app: harbor 7 | component: chartmuseum 8 | spec: 9 | capacity: 10 | storage: 5Gi 11 | accessModes: 12 | - ReadWriteOnce 13 | persistentVolumeReclaimPolicy: Recycle 14 | nfs: 15 | server: 192.168.30.129 16 | path: /data/harbor/chartmuseum 17 | 18 | --- 19 | apiVersion: v1 20 | kind: PersistentVolume 21 | metadata: 22 | name: harbor-jobservice 23 | labels: 24 | app: harbor 25 | component: jobservice 26 | spec: 27 | capacity: 28 | storage: 1Gi 29 | accessModes: 30 | - ReadWriteOnce 31 | persistentVolumeReclaimPolicy: Recycle 32 | nfs: 33 | server: 192.168.30.129 34 | path: /data/harbor/jobservice 35 | 36 | --- 37 | apiVersion: v1 38 | kind: PersistentVolume 39 | metadata: 40 | name: harbor-registry 41 | labels: 42 | app: harbor 43 | component: registry 44 | spec: 45 | capacity: 46 | storage: 10Gi 47 | accessModes: 48 | - ReadWriteOnce 49 | persistentVolumeReclaimPolicy: Recycle 50 | nfs: 51 | server: 192.168.30.129 52 | path: /data/harbor/registry 53 | 54 | --- 55 | apiVersion: v1 56 | kind: PersistentVolume 57 | metadata: 58 | name: harbor-database 59 | labels: 60 | app: harbor 61 | component: database 62 | spec: 63 | capacity: 64 | storage: 5Gi 65 | accessModes: 66 | - ReadWriteOnce 67 | persistentVolumeReclaimPolicy: Recycle 68 | nfs: 69 | server: 192.168.30.129 70 | path: /data/harbor/database 71 | 72 | --- 73 | apiVersion: v1 74 | kind: PersistentVolume 75 | metadata: 76 | name: harbor-redis 77 | labels: 78 | app: harbor 79 | component: redis 80 | spec: 81 | capacity: 82 | storage: 1Gi 83 | accessModes: 84 | - ReadWriteOnce 85 | persistentVolumeReclaimPolicy: Recycle 86 | nfs: 87 | server: 192.168.30.129 88 | path: /data/harbor/redis 89 | 90 | --- 91 | apiVersion: v1 92 | kind: PersistentVolume 93 | metadata: 94 | name: harbor-trivy 95 | labels: 96 | app: harbor 97 | component: trivy 98 | spec: 99 | capacity: 100 | storage: 5Gi 101 | accessModes: 102 | - ReadWriteOnce 103 | persistentVolumeReclaimPolicy: Recycle 104 | nfs: 105 | server: 192.168.30.129 106 | path: /data/harbor/trivy 107 | -------------------------------------------------------------------------------- /weave-scope/manifests/weave-scope-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: weave-scope 5 | namespace: weave 6 | labels: 7 | name: weave-scope 8 | 9 | --- 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRole 12 | metadata: 13 | name: weave-scope 14 | labels: 15 | name: weave-scope 16 | rules: 17 | - apiGroups: 18 | - '' 19 | resources: 20 | - pods 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - delete 26 | - apiGroups: 27 | - '' 28 | resources: 29 | - pods/log 30 | - services 31 | - nodes 32 | - namespaces 33 | - persistentvolumes 34 | - persistentvolumeclaims 35 | verbs: 36 | - get 37 | - list 38 | - watch 39 | - apiGroups: 40 | - apps 41 | resources: 42 | - deployments 43 | - daemonsets 44 | - statefulsets 45 | verbs: 46 | - get 47 | - list 48 | - watch 49 | - apiGroups: 50 | - batch 51 | resources: 52 | - cronjobs 53 | - jobs 54 | verbs: 55 | - get 56 | - list 57 | - watch 58 | - apiGroups: 59 | - extensions 60 | resources: 61 | - deployments 62 | - daemonsets 63 | verbs: 64 | - get 65 | - list 66 | - watch 67 | - apiGroups: 68 | - apps 69 | resources: 70 | - deployments/scale 71 | verbs: 72 | - get 73 | - update 74 | - apiGroups: 75 | - extensions 76 | resources: 77 | - deployments/scale 78 | verbs: 79 | - get 80 | - update 81 | - apiGroups: 82 | - storage.k8s.io 83 | resources: 84 | - storageclasses 85 | verbs: 86 | - get 87 | - list 88 | - watch 89 | - apiGroups: 90 | - volumesnapshot.external-storage.k8s.io 91 | resources: 92 | - volumesnapshots 93 | - volumesnapshotdatas 94 | verbs: 95 | - list 96 | - watch 97 | 98 | --- 99 | apiVersion: rbac.authorization.k8s.io/v1 100 | kind: ClusterRoleBinding 101 | metadata: 102 | name: weave-scope 103 | labels: 104 | name: weave-scope 105 | roleRef: 106 | kind: ClusterRole 107 | name: weave-scope 108 | apiGroup: rbac.authorization.k8s.io 109 | subjects: 110 | - kind: ServiceAccount 111 | name: weave-scope 112 | namespace: weave 113 | -------------------------------------------------------------------------------- /weave-scope/manifests/weave-scope-agent-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: weave-scope-agent 5 | namespace: weave 6 | labels: 7 | name: weave-scope-agent 8 | app: weave-scope 9 | weave-cloud-component: scope 10 | weave-scope-component: agent 11 | spec: 12 | updateStrategy: 13 | type: RollingUpdate 14 | minReadySeconds: 5 15 | selector: 16 | matchLabels: 17 | name: weave-scope-agent 18 | app: weave-scope 19 | weave-cloud-component: scope 20 | weave-scope-component: agent 21 | template: 22 | metadata: 23 | labels: 24 | name: weave-scope-agent 25 | app: weave-scope 26 | weave-cloud-component: scope 27 | weave-scope-component: agent 28 | spec: 29 | containers: 30 | - name: scope-agent 31 | image: docker.io/weaveworks/scope:1.13.1 32 | imagePullPolicy: IfNotPresent 33 | args: 34 | - '--mode=probe' 35 | - '--probe-only' 36 | - '--probe.kubernetes.role=host' 37 | - '--probe.publish.interval=4500ms' 38 | - '--probe.spy.interval=2s' 39 | - '--probe.docker.bridge=docker0' 40 | - '--probe.docker=true' 41 | - 'weave-scope-app.weave.svc.cluster.local:80' 42 | command: 43 | - /home/weave/scope 44 | env: [] 45 | resources: 46 | limits: 47 | memory: 2000Mi 48 | requests: 49 | cpu: 100m 50 | memory: 100Mi 51 | securityContext: 52 | privileged: true 53 | volumeMounts: 54 | - name: scope-plugins 55 | mountPath: /var/run/scope/plugins 56 | - name: sys-kernel-debug 57 | mountPath: /sys/kernel/debug 58 | - name: docker-socket 59 | mountPath: /var/run/docker.sock 60 | volumes: 61 | - name: scope-plugins 62 | hostPath: 63 | path: /var/run/scope/plugins 64 | - name: sys-kernel-debug 65 | hostPath: 66 | path: /sys/kernel/debug 67 | - name: docker-socket 68 | hostPath: 69 | path: /var/run/docker.sock 70 | dnsPolicy: ClusterFirstWithHostNet 71 | hostNetwork: true 72 | hostPID: true 73 | tolerations: 74 | - effect: NoSchedule 75 | operator: Exists 76 | - effect: NoExecute 77 | operator: Exists 78 | -------------------------------------------------------------------------------- /gitlab/minio/minio-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: gitlab-minio 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: minio 9 | spec: 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: gitlab 15 | component: minio 16 | template: 17 | metadata: 18 | name: gitlab-minio 19 | labels: 20 | app: gitlab 21 | component: minio 22 | spec: 23 | initContainers: 24 | - name: configure 25 | image: busybox:latest 26 | imagePullPolicy: IfNotPresent 27 | command: ["sh", "/config/configure"] 28 | resources: 29 | requests: 30 | cpu: 50m 31 | volumeMounts: 32 | - name: minio-configuration 33 | mountPath: /config 34 | - name: minio-server-config 35 | mountPath: /minio 36 | containers: 37 | - name: minio 38 | image: minio/minio:RELEASE.2017-12-28T01-21-00Z 39 | imagePullPolicy: IfNotPresent 40 | args: ["-C", "/tmp/.minio", "--quiet", "server", "/export"] 41 | ports: 42 | - name: service 43 | containerPort: 9000 44 | resources: 45 | requests: 46 | cpu: 100m 47 | memory: 128Mi 48 | livenessProbe: 49 | tcpSocket: 50 | port: 9000 51 | timeoutSeconds: 1 52 | volumeMounts: 53 | - name: export 54 | mountPath: /export 55 | - name: minio-server-config 56 | mountPath: "/tmp/.minio" 57 | - name: podinfo 58 | mountPath: /podinfo 59 | readOnly: false 60 | securityContext: 61 | runAsUser: 0 62 | fsGroup: 0 63 | volumes: 64 | - name: podinfo 65 | downwardAPI: 66 | items: 67 | - path: "labels" 68 | fieldRef: 69 | fieldPath: metadata.labels 70 | - name: export 71 | persistentVolumeClaim: 72 | claimName: gitlab-minio 73 | - name: minio-configuration 74 | projected: 75 | sources: 76 | - configMap: 77 | name: gitlab-minio-config-cm 78 | - secret: 79 | name: "gitlab-minio-secret" 80 | - name: minio-server-config 81 | emptyDir: 82 | medium: "Memory" 83 | -------------------------------------------------------------------------------- /gitlab-runner/gitlab-runner-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-runner 5 | namespace: public-service 6 | labels: 7 | app: gitlab-runner 8 | data: 9 | entrypoint: | 10 | set -e 11 | mkdir -p /home/gitlab-runner/.gitlab-runner/ 12 | cp /scripts/config.toml /home/gitlab-runner/.gitlab-runner/ 13 | 14 | if [[ -f /secrets/accesskey && -f /secrets/secretkey ]]; then 15 | export CACHE_S3_ACCESS_KEY=$(cat /secrets/accesskey) 16 | export CACHE_S3_SECRET_KEY=$(cat /secrets/secretkey) 17 | fi 18 | 19 | if [[ -f /secrets/gcs-applicaton-credentials-file ]]; then 20 | export GOOGLE_APPLICATION_CREDENTIALS="/secrets/gcs-applicaton-credentials-file" 21 | elif [[ -f /secrets/gcs-application-credentials-file ]]; then 22 | export GOOGLE_APPLICATION_CREDENTIALS="/secrets/gcs-application-credentials-file" 23 | else 24 | if [[ -f /secrets/gcs-access-id && -f /secrets/gcs-private-key ]]; then 25 | export CACHE_GCS_ACCESS_ID=$(cat /secrets/gcs-access-id) 26 | export CACHE_GCS_PRIVATE_KEY=$(echo -e $(cat /secrets/gcs-private-key)) 27 | fi 28 | fi 29 | 30 | if [[ -f /secrets/runner-registration-token ]]; then 31 | export REGISTRATION_TOKEN=$(cat /secrets/runner-registration-token) 32 | fi 33 | 34 | if [[ -f /secrets/runner-token ]]; then 35 | export CI_SERVER_TOKEN=$(cat /secrets/runner-token) 36 | fi 37 | 38 | if ! sh /scripts/register-the-runner; then 39 | exit 1 40 | fi 41 | 42 | if ! bash /scripts/pre-entrypoint-script; then 43 | exit 1 44 | fi 45 | 46 | exec /entrypoint run --user=gitlab-runner \ 47 | --working-directory=/home/gitlab-runner 48 | 49 | config.toml: | 50 | concurrent = 10 51 | check_interval = 30 52 | log_level = "info" 53 | 54 | configure: | 55 | set -e 56 | cp /init-secrets/* /secrets 57 | 58 | register-the-runner: | 59 | MAX_REGISTER_ATTEMPTS=30 60 | 61 | for i in $(seq 1 "${MAX_REGISTER_ATTEMPTS}"); do 62 | echo "Registration attempt ${i} of ${MAX_REGISTER_ATTEMPTS}" 63 | /entrypoint register \ 64 | --non-interactive 65 | 66 | retval=$? 67 | 68 | if [ ${retval} = 0 ]; then 69 | break 70 | elif [ ${i} = ${MAX_REGISTER_ATTEMPTS} ]; then 71 | exit 1 72 | fi 73 | 74 | sleep 5 75 | done 76 | 77 | exit 0 78 | 79 | check-live: | 80 | if /usr/bin/pgrep -f .*register-the-runner; then 81 | exit 0 82 | elif /usr/bin/pgrep gitlab.*runner; then 83 | exit 0 84 | else 85 | exit 1 86 | fi 87 | 88 | pre-entrypoint-script: | 89 | -------------------------------------------------------------------------------- /elfk/elasticsearch/elasticsearch-sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: elasticsearch 5 | namespace: public-service 6 | spec: 7 | serviceName: elasticsearch 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: elasticsearch 12 | template: 13 | metadata: 14 | labels: 15 | app: elasticsearch 16 | spec: 17 | terminationGracePeriodSeconds: 30 18 | containers: 19 | - name: elasticsearch 20 | image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.6.2 21 | resources: 22 | limits: 23 | cpu: 1000m 24 | requests: 25 | cpu: 100m 26 | ports: 27 | - containerPort: 9200 28 | name: api 29 | protocol: TCP 30 | - containerPort: 9300 31 | name: discovery 32 | protocol: TCP 33 | env: 34 | - name: "http.host" 35 | value: "0.0.0.0" 36 | - name: "network.host" 37 | value: "_eth0_" 38 | - name: "cluster.name" 39 | value: "es-cluster" 40 | - name: node.name 41 | valueFrom: 42 | fieldRef: 43 | fieldPath: metadata.name 44 | - name: "bootstrap.memory_lock" 45 | value: "false" 46 | - name: "discovery.seed_hosts" 47 | value: "elasticsearch" 48 | - name: "cluster.initial_master_nodes" 49 | value: "elasticsearch-0,elasticsearch-1,elasticsearch-2" 50 | - name: "discovery.seed_resolver.timeout" 51 | value: "10s" 52 | - name: "discovery.zen.minimum_master_nodes" 53 | value: "2" 54 | - name: "ES_JAVA_OPTS" 55 | value: "-Xms512m -Xmx512m" 56 | volumeMounts: 57 | - name: data 58 | mountPath: /usr/share/elasticsearch/data 59 | volumes: 60 | - name: data 61 | hostPath: 62 | path: /home/elasticsearch/data #该路径为es数据存储目录,自动创建 63 | initContainers: 64 | - name: fix-permissions 65 | image: busybox 66 | command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] 67 | securityContext: 68 | privileged: true 69 | volumeMounts: 70 | - name: data 71 | mountPath: /usr/share/elasticsearch/data 72 | - name: increase-vm-max-map 73 | image: busybox 74 | command: ["sysctl", "-w", "vm.max_map_count=262144"] 75 | securityContext: 76 | privileged: true 77 | - name: increase-fd-ulimit 78 | image: busybox 79 | command: ["sh", "-c", "ulimit -n 65536"] 80 | securityContext: 81 | privileged: true 82 | -------------------------------------------------------------------------------- /gitlab/redis/redis-health-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-redis-health 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: redis 9 | data: 10 | ping_readiness_local.sh: |- 11 | password_aux=`cat ${REDIS_PASSWORD_FILE}` 12 | export REDIS_PASSWORD=$password_aux 13 | response=$( 14 | timeout -s 9 $1 \ 15 | redis-cli \ 16 | -a $REDIS_PASSWORD --no-auth-warning \ 17 | -h localhost \ 18 | -p $REDIS_PORT \ 19 | ping 20 | ) 21 | if [ "$response" != "PONG" ]; then 22 | echo "$response" 23 | exit 1 24 | fi 25 | 26 | ping_liveness_local.sh: |- 27 | password_aux=`cat ${REDIS_PASSWORD_FILE}` 28 | export REDIS_PASSWORD=$password_aux 29 | response=$( 30 | timeout -s 9 $1 \ 31 | redis-cli \ 32 | -a $REDIS_PASSWORD --no-auth-warning \ 33 | -h localhost \ 34 | -p $REDIS_PORT \ 35 | ping 36 | ) 37 | if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then 38 | echo "$response" 39 | exit 1 40 | fi 41 | 42 | ping_readiness_master.sh: |- 43 | password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` 44 | export REDIS_MASTER_PASSWORD=$password_aux 45 | response=$( 46 | timeout -s 9 $1 \ 47 | redis-cli \ 48 | -a $REDIS_MASTER_PASSWORD --no-auth-warning \ 49 | -h $REDIS_MASTER_HOST \ 50 | -p $REDIS_MASTER_PORT_NUMBER \ 51 | ping 52 | ) 53 | if [ "$response" != "PONG" ]; then 54 | echo "$response" 55 | exit 1 56 | fi 57 | 58 | ping_liveness_master.sh: |- 59 | password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` 60 | export REDIS_MASTER_PASSWORD=$password_aux 61 | response=$( 62 | timeout -s 9 $1 \ 63 | redis-cli \ 64 | -a $REDIS_MASTER_PASSWORD --no-auth-warning \ 65 | -h $REDIS_MASTER_HOST \ 66 | -p $REDIS_MASTER_PORT_NUMBER \ 67 | ping 68 | ) 69 | if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then 70 | echo "$response" 71 | exit 1 72 | fi 73 | 74 | ping_readiness_local_and_master.sh: |- 75 | script_dir="$(dirname "$0")" 76 | exit_status=0 77 | "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? 78 | "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? 79 | exit $exit_status 80 | 81 | ping_liveness_local_and_master.sh: |- 82 | script_dir="$(dirname "$0")" 83 | exit_status=0 84 | "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? 85 | "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? 86 | exit $exit_status 87 | -------------------------------------------------------------------------------- /gitlab/webservice/workhorse-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-workhorse-config 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: webservice 9 | data: 10 | installation_type: | 11 | gitlab-helm-chart 12 | 13 | workhorse-config.toml.erb: | 14 | [redis] 15 | URL = "redis://gitlab-redis-master:6379" 16 | Password = "<%= File.read("/etc/gitlab/redis/redis-password").strip.dump[1..-2] %>" 17 | <% 18 | require 'yaml' 19 | 20 | supported_providers = %w(AWS AzureRM) 21 | provider = '' 22 | aws_access_key_id = '' 23 | aws_secret_access_key = '' 24 | 25 | azure_storage_account_name = '' 26 | azure_storage_access_key = '' 27 | 28 | if File.exists? '/etc/gitlab/minio/accesskey' 29 | provider = 'AWS' 30 | aws_access_key_id = File.read('/etc/gitlab/minio/accesskey').strip.dump[1..-2] 31 | aws_secret_access_key = File.read('/etc/gitlab/minio/secretkey').strip.dump[1..-2] 32 | end 33 | 34 | if File.exists? '/etc/gitlab/objectstorage/object_store' 35 | connection = YAML.safe_load(File.read('/etc/gitlab/objectstorage/object_store')) 36 | provider = connection['provider'] 37 | if connection.has_key? 'aws_access_key_id' 38 | aws_access_key_id = connection['aws_access_key_id'] 39 | aws_secret_access_key = connection['aws_secret_access_key'] 40 | elsif connection.has_key? 'azure_storage_account_name' 41 | azure_storage_account_name = connection['azure_storage_account_name'] 42 | azure_storage_access_key = connection['azure_storage_access_key'] 43 | end 44 | end 45 | 46 | if supported_providers.include? provider 47 | %> 48 | [object_storage] 49 | provider = "<%= provider %>" 50 | <% if provider.eql? 'AWS' %> 51 | 52 | [object_storage.s3] 53 | 54 | aws_access_key_id = "<%= aws_access_key_id %>" 55 | aws_secret_access_key = "<%= aws_secret_access_key %>" 56 | <% elsif provider.eql? 'AzureRM' %> 57 | 58 | [object_storage.azurerm] 59 | azure_storage_account_name = "<%= azure_storage_account_name %>" 60 | azure_storage_access_key = "<%= azure_storage_access_key %>" 61 | <% 62 | end 63 | end 64 | %> 65 | 66 | configure: | 67 | set -e 68 | mkdir -p /init-secrets-workhorse/gitlab-workhorse 69 | cp -v -r -L /init-config/gitlab-workhorse/secret /init-secrets-workhorse/gitlab-workhorse/secret 70 | mkdir -p /init-secrets-workhorse/redis 71 | cp -v -r -L /init-config/redis/redis-password /init-secrets-workhorse/redis/ 72 | if [ -d /init-config/minio ]; then 73 | mkdir -p /init-secrets-workhorse/minio 74 | cp -v -r -L /init-config/minio/* /init-secrets-workhorse/minio/ 75 | fi 76 | 77 | -------------------------------------------------------------------------------- /harbor/harbor/templates/notary/notary-signer.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.notary.enabled }} 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: {{ template "harbor.notary-signer" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | component: notary-signer 9 | spec: 10 | replicas: {{ .Values.notary.signer.replicas }} 11 | selector: 12 | matchLabels: 13 | {{ include "harbor.matchLabels" . | indent 6 }} 14 | component: notary-signer 15 | template: 16 | metadata: 17 | labels: 18 | {{ include "harbor.labels" . | indent 8 }} 19 | component: notary-signer 20 | annotations: 21 | checksum/secret: {{ include (print $.Template.BasePath "/notary/notary-secret.yaml") . | sha256sum }} 22 | spec: 23 | securityContext: 24 | fsGroup: 10000 25 | {{- if .Values.notary.signer.serviceAccountName }} 26 | serviceAccountName: {{ .Values.notary.signer.serviceAccountName }} 27 | {{- end -}} 28 | {{- with .Values.imagePullSecrets }} 29 | imagePullSecrets: 30 | {{- toYaml . | nindent 8 }} 31 | {{- end }} 32 | containers: 33 | - name: notary-signer 34 | image: {{ .Values.notary.signer.image.repository }}:{{ .Values.notary.signer.image.tag }} 35 | imagePullPolicy: {{ .Values.imagePullPolicy }} 36 | {{- if .Values.notary.signer.resources }} 37 | resources: 38 | {{ toYaml .Values.notary.signer.resources | indent 10 }} 39 | {{- end }} 40 | env: 41 | - name: MIGRATIONS_PATH 42 | value: migrations/signer/postgresql 43 | - name: DB_URL 44 | value: {{ template "harbor.database.notarySigner" . }} 45 | - name: NOTARY_SIGNER_DEFAULTALIAS 46 | value: defaultalias 47 | volumeMounts: 48 | - name: config 49 | mountPath: /etc/notary/signer-config.postgres.json 50 | subPath: signer.json 51 | - name: signer-certificate 52 | mountPath: /etc/ssl/notary/tls.crt 53 | subPath: tls.crt 54 | - name: signer-certificate 55 | mountPath: /etc/ssl/notary/tls.key 56 | subPath: tls.key 57 | volumes: 58 | - name: config 59 | secret: 60 | secretName: "{{ template "harbor.notary-server" . }}" 61 | - name: signer-certificate 62 | secret: 63 | {{- if .Values.notary.secretName }} 64 | secretName: {{ .Values.notary.secretName }} 65 | {{- else }} 66 | secretName: {{ template "harbor.notary-server" . }} 67 | {{- end }} 68 | {{- with .Values.notary.nodeSelector }} 69 | nodeSelector: 70 | {{ toYaml . | indent 8 }} 71 | {{- end }} 72 | {{- with .Values.notary.affinity }} 73 | affinity: 74 | {{ toYaml . | indent 8 }} 75 | {{- end }} 76 | {{- with .Values.notary.tolerations }} 77 | tolerations: 78 | {{ toYaml . | indent 8 }} 79 | {{- end }} 80 | {{ end }} 81 | -------------------------------------------------------------------------------- /prometheus/kube-state-metrics/kube-state-metrics-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: public-service 6 | labels: 7 | app: kube-state-metrics 8 | 9 | --- 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRole 12 | metadata: 13 | name: kube-state-metrics 14 | labels: 15 | app: kube-state-metrics 16 | rules: 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - configmaps 21 | - secrets 22 | - nodes 23 | - pods 24 | - services 25 | - resourcequotas 26 | - replicationcontrollers 27 | - limitranges 28 | - persistentvolumeclaims 29 | - persistentvolumes 30 | - namespaces 31 | - endpoints 32 | verbs: 33 | - list 34 | - watch 35 | - apiGroups: 36 | - extensions 37 | resources: 38 | - daemonsets 39 | - deployments 40 | - replicasets 41 | - ingresses 42 | verbs: 43 | - list 44 | - watch 45 | - apiGroups: 46 | - apps 47 | resources: 48 | - statefulsets 49 | - daemonsets 50 | - deployments 51 | - replicasets 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - batch 57 | resources: 58 | - cronjobs 59 | - jobs 60 | verbs: 61 | - list 62 | - watch 63 | - apiGroups: 64 | - autoscaling 65 | resources: 66 | - horizontalpodautoscalers 67 | verbs: 68 | - list 69 | - watch 70 | - apiGroups: 71 | - authentication.k8s.io 72 | resources: 73 | - tokenreviews 74 | verbs: 75 | - create 76 | - apiGroups: 77 | - authorization.k8s.io 78 | resources: 79 | - subjectaccessreviews 80 | verbs: 81 | - create 82 | - apiGroups: 83 | - policy 84 | resources: 85 | - poddisruptionbudgets 86 | verbs: 87 | - list 88 | - watch 89 | - apiGroups: 90 | - certificates.k8s.io 91 | resources: 92 | - certificatesigningrequests 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - storage.k8s.io 98 | resources: 99 | - storageclasses 100 | - volumeattachments 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - admissionregistration.k8s.io 106 | resources: 107 | - mutatingwebhookconfigurations 108 | - validatingwebhookconfigurations 109 | verbs: 110 | - list 111 | - watch 112 | - apiGroups: 113 | - networking.k8s.io 114 | resources: 115 | - networkpolicies 116 | verbs: 117 | - list 118 | - watch 119 | - apiGroups: 120 | - coordination.k8s.io 121 | resources: 122 | - leases 123 | verbs: 124 | - list 125 | - watch 126 | 127 | --- 128 | apiVersion: rbac.authorization.k8s.io/v1 129 | kind: ClusterRoleBinding 130 | metadata: 131 | name: kube-state-metrics 132 | labels: 133 | app: kube-state-metrics 134 | roleRef: 135 | apiGroup: rbac.authorization.k8s.io 136 | kind: ClusterRole 137 | name: kube-state-metrics 138 | subjects: 139 | - kind: ServiceAccount 140 | name: kube-state-metrics 141 | namespace: public-service 142 | -------------------------------------------------------------------------------- /zabbix/zabbix-server/zabbix-server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zabbix-server 5 | namespace: monitoring 6 | labels: 7 | app: zabbix-server 8 | spec: 9 | selector: 10 | app: zabbix-server 11 | ports: 12 | - name: zabbix-server 13 | port: 10051 14 | nodePort: 30051 15 | type: NodePort 16 | 17 | --- 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: zabbix-server 22 | namespace: monitoring 23 | spec: 24 | replicas: 1 25 | selector: 26 | matchLabels: 27 | app: zabbix-server 28 | template: 29 | metadata: 30 | labels: 31 | app: zabbix-server 32 | spec: 33 | containers: 34 | - name: zabbix-server 35 | image: zabbix/zabbix-server-mysql:latest 36 | imagePullPolicy: IfNotPresent 37 | env: 38 | - name: DB_SERVER_HOST 39 | value: mysql 40 | - name: DB_SERVER_PORT 41 | value: "3306" 42 | - name: MYSQL_DATABASE 43 | value: zabbix 44 | - name: MYSQL_USER 45 | value: zabbix 46 | - name: MYSQL_PASSWORD 47 | value: zabbix 48 | - name: MYSQL_ROOT_PASSWORD 49 | value: zabbix 50 | - name: ZBX_CACHESIZE 51 | value: "512M" 52 | - name: ZBX_HISTORYCACHESIZE 53 | value: "128M" 54 | - name: ZBX_HISTORYINDEXCACHESIZE 55 | value: "128M" 56 | - name: ZBX_TRENDCACHESIZE 57 | value: "128M" 58 | - name: ZBX_VALUECACHESIZE 59 | value: "256M" 60 | - name: ZBX_TIMEOUT 61 | value: "30" 62 | ports: 63 | - containerPort: 10051 64 | name: zabbix-server 65 | protocol: TCP 66 | resources: 67 | requests: 68 | cpu: 1000m 69 | memory: 1000Mi 70 | limits: 71 | cpu: 1000m 72 | memory: 1000Mi 73 | - name: zabbix-agent 74 | image: zabbix/zabbix-agent:latest 75 | imagePullPolicy: IfNotPresent 76 | env: 77 | - name: ZBX_HOSTNAME 78 | value: "Zabbix server" 79 | - name: ZBX_SERVER_HOST 80 | value: "127.0.0.1" 81 | - name: ZBX_STARTAGENTS 82 | value: "3" 83 | - name: ZBX_UNSAFEUSERPARAMETERS 84 | value: "1" 85 | - name: ZBX_TIMEOUT 86 | value: "10" 87 | ports: 88 | - containerPort: 10050 89 | name: zabbix-agent 90 | protocol: TCP 91 | resources: 92 | requests: 93 | cpu: 200m 94 | memory: 200Mi 95 | limits: 96 | cpu: 200m 97 | memory: 200Mi 98 | nodeSelector: #固定zabbix server ip 99 | node-role.kubernetes.io/master: "" 100 | tolerations: 101 | - key: node-role.kubernetes.io/master 102 | operator: Exists 103 | effect: NoSchedule 104 | -------------------------------------------------------------------------------- /harbor/harbor/templates/core/core-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "harbor.core" . }} 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | data: 8 | app.conf: |+ 9 | appname = Harbor 10 | runmode = prod 11 | enablegzip = true 12 | 13 | [prod] 14 | httpport = {{ ternary "8443" "8080" .Values.internalTLS.enabled }} 15 | PORT: "{{ ternary "8443" "8080" .Values.internalTLS.enabled }}" 16 | DATABASE_TYPE: "postgresql" 17 | POSTGRESQL_HOST: "{{ template "harbor.database.host" . }}" 18 | POSTGRESQL_PORT: "{{ template "harbor.database.port" . }}" 19 | POSTGRESQL_USERNAME: "{{ template "harbor.database.username" . }}" 20 | POSTGRESQL_DATABASE: "{{ template "harbor.database.coreDatabase" . }}" 21 | POSTGRESQL_SSLMODE: "{{ template "harbor.database.sslmode" . }}" 22 | POSTGRESQL_MAX_IDLE_CONNS: "{{ .Values.database.maxIdleConns }}" 23 | POSTGRESQL_MAX_OPEN_CONNS: "{{ .Values.database.maxOpenConns }}" 24 | EXT_ENDPOINT: "{{ .Values.externalURL }}" 25 | CORE_URL: "{{ template "harbor.coreURL" . }}" 26 | JOBSERVICE_URL: "{{ template "harbor.jobserviceURL" . }}" 27 | REGISTRY_URL: "{{ template "harbor.registryURL" . }}" 28 | TOKEN_SERVICE_URL: "{{ template "harbor.tokenServiceURL" . }}" 29 | WITH_NOTARY: "{{ .Values.notary.enabled }}" 30 | NOTARY_URL: "http://{{ template "harbor.notary-server" . }}:4443" 31 | CORE_LOCAL_URL: "{{ ternary "https://127.0.0.1:8443" "http://127.0.0.1:8080" .Values.internalTLS.enabled }}" 32 | WITH_CLAIR: "{{ .Values.clair.enabled }}" 33 | CLAIR_ADAPTER_URL: "{{ template "harbor.clairAdapterURL" . }}" 34 | WITH_TRIVY: {{ .Values.trivy.enabled | quote }} 35 | TRIVY_ADAPTER_URL: "{{ template "harbor.trivyAdapterURL" . }}" 36 | REGISTRY_STORAGE_PROVIDER_NAME: "{{ .Values.persistence.imageChartStorage.type }}" 37 | WITH_CHARTMUSEUM: "{{ .Values.chartmuseum.enabled }}" 38 | CHART_REPOSITORY_URL: "{{ template "harbor.component.scheme" . }}://{{ template "harbor.chartmuseum" . }}" 39 | LOG_LEVEL: "{{ .Values.logLevel }}" 40 | CONFIG_PATH: "/etc/core/app.conf" 41 | CHART_CACHE_DRIVER: "redis" 42 | _REDIS_URL_CORE: "{{ template "harbor.redis.urlForCore" . }}" 43 | _REDIS_URL_REG: "{{ template "harbor.redis.urlForRegistry" . }}" 44 | PORTAL_URL: "{{ template "harbor.portalURL" . }}" 45 | REGISTRY_CONTROLLER_URL: "{{ template "harbor.registryControllerURL" . }}" 46 | REGISTRY_CREDENTIAL_USERNAME: "{{ .Values.registry.credentials.username }}" 47 | {{- if .Values.uaaSecretName }} 48 | UAA_CA_ROOT: "/etc/core/auth-ca/auth-ca.crt" 49 | {{- end }} 50 | {{- if has "core" .Values.proxy.components }} 51 | HTTP_PROXY: "{{ .Values.proxy.httpProxy }}" 52 | HTTPS_PROXY: "{{ .Values.proxy.httpsProxy }}" 53 | NO_PROXY: "{{ template "harbor.noProxy" . }}" 54 | {{- end }} 55 | PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor" 56 | {{- if hasKey .Values.core "gcTimeWindowHours" }} 57 | #make the GC time window configurable for testing 58 | GC_TIME_WINDOW_HOURS: "{{ .Values.core.gcTimeWindowHours }}" 59 | {{- end }} -------------------------------------------------------------------------------- /harbor/harbor/templates/nginx/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (eq .Values.expose.type "clusterIP") (eq .Values.expose.type "nodePort") (eq .Values.expose.type "loadBalancer") }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | {{- if eq .Values.expose.type "clusterIP" }} 6 | {{- $clusterIP := .Values.expose.clusterIP }} 7 | name: {{ $clusterIP.name }} 8 | labels: 9 | {{ include "harbor.labels" . | indent 4 }} 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - name: http 14 | port: {{ $clusterIP.ports.httpPort }} 15 | targetPort: 8080 16 | {{- if .Values.expose.tls.enabled }} 17 | - name: https 18 | port: {{ $clusterIP.ports.httpsPort }} 19 | targetPort: 8443 20 | {{- end }} 21 | {{- if .Values.notary.enabled }} 22 | - name: notary 23 | port: {{ $clusterIP.ports.notaryPort }} 24 | targetPort: 4443 25 | {{- end }} 26 | {{- else if eq .Values.expose.type "nodePort" }} 27 | {{- $nodePort := .Values.expose.nodePort }} 28 | name: {{ $nodePort.name }} 29 | labels: 30 | {{ include "harbor.labels" . | indent 4 }} 31 | spec: 32 | type: NodePort 33 | ports: 34 | - name: http 35 | port: {{ $nodePort.ports.http.port }} 36 | targetPort: 8080 37 | {{- if $nodePort.ports.http.nodePort }} 38 | nodePort: {{ $nodePort.ports.http.nodePort }} 39 | {{- end }} 40 | {{- if .Values.expose.tls.enabled }} 41 | - name: https 42 | port: {{ $nodePort.ports.https.port }} 43 | targetPort: 8443 44 | {{- if $nodePort.ports.https.nodePort }} 45 | nodePort: {{ $nodePort.ports.https.nodePort }} 46 | {{- end }} 47 | {{- end }} 48 | {{- if .Values.notary.enabled }} 49 | - name: notary 50 | port: {{ $nodePort.ports.notary.port }} 51 | targetPort: 4443 52 | {{- if $nodePort.ports.notary.nodePort }} 53 | nodePort: {{ $nodePort.ports.notary.nodePort }} 54 | {{- end }} 55 | {{- end }} 56 | {{- else if eq .Values.expose.type "loadBalancer" }} 57 | {{- $loadBalancer := .Values.expose.loadBalancer }} 58 | name: {{ $loadBalancer.name }} 59 | labels: 60 | {{ include "harbor.labels" . | indent 4 }} 61 | {{- with $loadBalancer.annotations }} 62 | annotations: 63 | {{- toYaml . | nindent 4 }} 64 | {{- end }} 65 | spec: 66 | type: LoadBalancer 67 | {{- with $loadBalancer.sourceRanges }} 68 | loadBalancerSourceRanges: 69 | {{- toYaml . | nindent 4 }} 70 | {{- end }} 71 | {{- if $loadBalancer.IP }} 72 | loadBalancerIP: {{ $loadBalancer.IP }} 73 | {{- end }} 74 | ports: 75 | - name: http 76 | port: {{ $loadBalancer.ports.httpPort }} 77 | targetPort: 8080 78 | {{- if .Values.expose.tls.enabled }} 79 | - name: https 80 | port: {{ $loadBalancer.ports.httpsPort }} 81 | targetPort: 8443 82 | {{- end }} 83 | {{- if .Values.notary.enabled }} 84 | - name: notary 85 | port: {{ $loadBalancer.ports.notaryPort }} 86 | targetPort: 4443 87 | {{- end }} 88 | {{- end }} 89 | selector: 90 | {{ include "harbor.matchLabels" . | indent 4 }} 91 | component: nginx 92 | {{- end }} 93 | -------------------------------------------------------------------------------- /consul/client/consul-client-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: consul 5 | namespace: public-service 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: consul 10 | component: client 11 | template: 12 | metadata: 13 | labels: 14 | app: consul 15 | component: client 16 | spec: 17 | affinity: 18 | podAntiAffinity: 19 | requiredDuringSchedulingIgnoredDuringExecution: 20 | - labelSelector: 21 | matchExpressions: 22 | - key: "componment" 23 | operator: In 24 | values: 25 | - client 26 | topologyKey: "kubernetes.io/hostname" 27 | terminationGracePeriodSeconds: 10 28 | containers: 29 | - name: consul 30 | image: consul:latest 31 | imagePullPolicy: IfNotPresent 32 | ports: 33 | - containerPort: 8500 34 | name: http 35 | - containerPort: 8600 36 | name: dns-tcp 37 | protocol: TCP 38 | - containerPort: 8600 39 | name: dns-udp 40 | protocol: UDP 41 | - containerPort: 8301 42 | name: serflan 43 | - containerPort: 8302 44 | name: serfwan 45 | - containerPort: 8300 46 | name: server 47 | env: 48 | - name: POD_IP 49 | valueFrom: 50 | fieldRef: 51 | fieldPath: status.podIP 52 | - name: NAMESPACE 53 | valueFrom: 54 | fieldRef: 55 | fieldPath: metadata.namespace 56 | args: 57 | - "agent" 58 | - "-advertise=$(POD_IP)" 59 | - "-bind=0.0.0.0" 60 | - "-datacenter=dc1" 61 | - "-config-dir=/consul/userconfig" 62 | - "-data-dir=/consul/data" 63 | - "-disable-host-node-id=true" 64 | - "-domain=cluster.local" 65 | - "-retry-join=consul-server-0.consul-server.$(NAMESPACE).svc.cluster.local" 66 | - "-client=0.0.0.0" 67 | resources: 68 | limits: 69 | cpu: "50m" 70 | memory: "32Mi" 71 | requests: 72 | cpu: "50m" 73 | memory: "32Mi" 74 | lifecycle: 75 | preStop: 76 | exec: 77 | command: 78 | - /bin/sh 79 | - -c 80 | - consul leave 81 | volumeMounts: 82 | - name: data 83 | mountPath: /consul/data 84 | - name: user-config 85 | mountPath: /consul/userconfig 86 | volumes: 87 | - name: user-config 88 | configMap: 89 | name: consul-client-config 90 | - name: data 91 | emptyDir: {} 92 | securityContext: 93 | fsGroup: 1000 94 | 95 | # volumeClaimTemplates: 96 | # - metadata: 97 | # name: data 98 | # spec: 99 | # accessModes: 100 | # - ReadWriteMany 101 | # storageClassName: "gluster-heketi-2" 102 | # resources: 103 | # requests: 104 | # storage: 10Gi 105 | -------------------------------------------------------------------------------- /consul/README.md: -------------------------------------------------------------------------------- 1 | ### consul 2 | 3 | - PodDisruptionBudget: 4 | 5 | k8s可以为每个应用程序创建 `PodDisruptionBudget` 对象(PDB)。PDB 将限制在同一时间因资源干扰导致的复制应用程序中宕机的 pod 数量。 6 | 7 | 可以通过两个参数来配置PodDisruptionBudget: 8 | 9 | ```a 10 | MinAvailable:表示最小可用Pod数,表示应用Pod集群处于运行状态的最小Pod数量,或者是运行状态的Pod数同总Pod数的最小百分比 11 | 12 | MaxUnavailable:表示最大不可用Pod数,表示应用Pod集群处于不可用状态的最大Pod数,或者是不可用状态的Pod数同总Pod数的最大百分比 13 | ``` 14 | 15 | 需要注意的是,`MinAvailable`参数和`MaxUnavailable`参数只能同时配置一个。 16 | 17 | - 部署: 18 | 19 | ```bash 20 | kubectl apply -f public-service-ns.yaml 21 | 22 | kubectl apply -f server/ 23 | 24 | kubectl get svc -n public-service 25 | 26 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 27 | consul-dns ClusterIP 10.110.235.63 53/TCP,53/UDP 85s 28 | consul-server ClusterIP None 8500/TCP,8600/TCP,8600/UDP,8301/TCP,8301/UDP,8302/TCP,8302/UDP,8300/TCP 85s 29 | consul-ui ClusterIP 10.98.220.223 80/TCP 85s 30 | 31 | 32 | kubectl get pod -n public-service 33 | 34 | NAME READY STATUS RESTARTS AGE 35 | consul-server-0 1/1 Running 0 110s 36 | consul-server-1 1/1 Running 0 107s 37 | consul-server-2 1/1 Running 0 92s 38 | ``` 39 | 40 | - 查看集群状态: 41 | 42 | ```bash 43 | kubectl exec -n public-service consul-server-0 -- consul members 44 | 45 | Node Address Status Type Build Protocol DC Segment 46 | consul-server-0 172.10.135.17:8301 alive server 1.8.3 2 dc1 47 | consul-server-1 172.10.104.11:8301 alive server 1.8.3 2 dc1 48 | consul-server-2 172.10.166.136:8301 alive server 1.8.3 2 dc1 49 | ``` 50 | 51 | - 访问ui: 52 | 53 | 添加hosts:`consul.lzxlinux.com`,访问`consul.lzxlinux.com/ui`。 54 | 55 | - 加入client: 56 | 57 | ```bash 58 | kubectl apply -f client/ 59 | 60 | kubectl get pod -n public-service 61 | 62 | NAME READY STATUS RESTARTS AGE 63 | consul-8wx22 1/1 Running 0 40s 64 | consul-glmgs 1/1 Running 0 10s 65 | consul-server-0 1/1 Running 0 30m 66 | consul-server-1 1/1 Running 0 30m 67 | consul-server-2 1/1 Running 0 30m 68 | consul-vxbj7 1/1 Running 0 61s 69 | ``` 70 | 71 | ```bash 72 | kubectl exec -n public-service consul-server-0 -- consul members 73 | 74 | Node Address Status Type Build Protocol DC Segment 75 | consul-server-0 172.10.135.17:8301 alive server 1.8.3 2 dc1 76 | consul-server-1 172.10.104.11:8301 alive server 1.8.3 2 dc1 77 | consul-server-2 172.10.166.136:8301 alive server 1.8.3 2 dc1 78 | consul-8wx22 172.10.166.138:8301 alive client 1.8.3 2 dc1 79 | consul-glmgs 172.10.135.19:8301 alive client 1.8.3 2 dc1 80 | consul-vxbj7 172.10.104.13:8301 alive client 1.8.3 2 dc1 81 | ``` 82 | 83 | --- 84 | -------------------------------------------------------------------------------- /prometheus/prometheus/rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus-rules 5 | namespace: public-service 6 | data: 7 | node.rules: | 8 | groups: 9 | - name: node 10 | rules: 11 | - alert: NodeDown 12 | expr: up == 0 13 | for: 3m 14 | labels: 15 | severity: critical 16 | annotations: 17 | summary: "{{ $labels.instance }}: down" 18 | description: "{{ $labels.instance }} has been down for more than 3m" 19 | value: "{{ $value }}" 20 | 21 | - alert: NodeCPUHigh 22 | expr: (1 - avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m]))) * 100 > 75 23 | for: 5m 24 | labels: 25 | severity: warning 26 | annotations: 27 | summary: "{{$labels.instance}}: High CPU usage" 28 | description: "{{$labels.instance}}: CPU usage is above 75%" 29 | value: "{{ $value }}" 30 | 31 | - alert: NodeCPUIowaitHigh 32 | expr: avg by (instance) (irate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 50 33 | for: 5m 34 | labels: 35 | severity: warning 36 | annotations: 37 | summary: "{{$labels.instance}}: High CPU iowait usage" 38 | description: "{{$labels.instance}}: CPU iowait usage is above 50%" 39 | value: "{{ $value }}" 40 | 41 | - alert: NodeMemoryUsageHigh 42 | expr: (1 - node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100 > 90 43 | for: 5m 44 | labels: 45 | severity: warning 46 | annotations: 47 | summary: "{{$labels.instance}}: High memory usage" 48 | description: "{{$labels.instance}}: Memory usage is above 90%" 49 | value: "{{ $value }}" 50 | 51 | - alert: NodeDiskRootLow 52 | expr: (1 - node_filesystem_avail_bytes{fstype=~"ext.*|xfs",mountpoint ="/"} / node_filesystem_size_bytes{fstype=~"ext.*|xfs",mountpoint ="/"}) * 100 > 80 53 | for: 10m 54 | labels: 55 | severity: warning 56 | annotations: 57 | summary: "{{$labels.instance}}: Low disk(the / partition) space" 58 | description: "{{$labels.instance}}: Disk(the / partition) usage is above 80%" 59 | value: "{{ $value }}" 60 | 61 | - alert: NodeDiskBootLow 62 | expr: (1 - node_filesystem_avail_bytes{fstype=~"ext.*|xfs",mountpoint ="/boot"} / node_filesystem_size_bytes{fstype=~"ext.*|xfs",mountpoint ="/boot"}) * 100 > 80 63 | for: 10m 64 | labels: 65 | severity: warning 66 | annotations: 67 | summary: "{{$labels.instance}}: Low disk(the /boot partition) space" 68 | description: "{{$labels.instance}}: Disk(the /boot partition) usage is above 80%" 69 | value: "{{ $value }}" 70 | 71 | - alert: NodeLoad5High 72 | expr: (node_load5) > (count by (instance) (node_cpu_seconds_total{mode='system'}) * 2) 73 | for: 5m 74 | labels: 75 | severity: warning 76 | annotations: 77 | summary: "{{$labels.instance}}: Load(5m) High" 78 | description: "{{$labels.instance}}: Load(5m) is 2 times the number of CPU cores" 79 | value: "{{ $value }}" 80 | -------------------------------------------------------------------------------- /gitlab/registry/registry-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: gitlab-registry 5 | namespace: public-service 6 | labels: 7 | app: gitlab 8 | component: registry 9 | data: 10 | configure: |- 11 | if [ -e /config/accesskey ] ; then 12 | sed -e 's@ACCESS_KEY@'"$(cat /config/accesskey)"'@' -e 's@SECRET_KEY@'"$(cat /config/secretkey)"'@' /config/config.yml > /registry/config.yml 13 | else 14 | cp -v -r -L /config/config.yml /registry/config.yml 15 | fi 16 | 17 | sed -i -e 's@HTTP_SECRET@'"$(cat /config/httpSecret)"'@' /registry/config.yml 18 | 19 | if [ -d /config/notifications ]; then 20 | for i in /config/notifications/*; do 21 | filename=$(basename $i); 22 | sed -i -e 's@'"${filename}"'@'"$(cat $i)"'@' /registry/config.yml; 23 | done 24 | fi 25 | 26 | if [ -d /config/storage ]; then 27 | 28 | mkdir -p /registry/storage 29 | cp -v -r -L /config/storage/* /registry/storage/ 30 | 31 | echo '' >> /registry/storage/config 32 | 33 | if ! $(egrep -A1 '^delete:\s*$' /registry/storage/config | egrep -q '\s{2,4}enabled:') ; then 34 | echo 'delete:' >> /registry/storage/config 35 | echo ' enabled: true' >> /registry/storage/config 36 | fi 37 | 38 | sed -i 's/^/ /' /registry/storage/config 39 | 40 | sed -i '/storage:/ r /registry/storage/config' /registry/config.yml 41 | 42 | rm /registry/storage/config 43 | fi 44 | 45 | cat /config/certificate.crt > /registry/certificate.crt 46 | 47 | if [ -f /config/profiling-key.json ]; then 48 | cp /config/profiling-key.json /registry/profiling-key.json 49 | fi 50 | 51 | config.yml: | 52 | version: 0.1 53 | log: 54 | fields: 55 | service: registry 56 | level: warn 57 | http: 58 | debug: 59 | addr: ':5001' 60 | prometheus: 61 | enabled: false 62 | path: /metrics 63 | draintimeout: 0 64 | headers: 65 | X-Content-Type-Options: [nosniff] 66 | addr: :5000 67 | secret: "HTTP_SECRET" 68 | relativeurls: false 69 | health: 70 | storagedriver: 71 | enabled: false 72 | interval: 10s 73 | threshold: 3 74 | auth: 75 | token: 76 | realm: http://gitlab.lzxlinux.com/jwt/auth 77 | service: container_registry 78 | issuer: "gitlab-issuer" 79 | rootcertbundle: /etc/docker/registry/certificate.crt 80 | autoredirect: false 81 | compatibility: 82 | schema1: 83 | enabled: false 84 | validation: 85 | disabled: true 86 | 87 | profiling: 88 | storage: 89 | maintenance: 90 | readonly: 91 | enabled: false 92 | s3: 93 | accesskey: "ACCESS_KEY" 94 | secretkey: "SECRET_KEY" 95 | region: us-east-1 96 | regionendpoint: http://gitlab-minio-svc:9000 97 | bucket: registry 98 | secure: true 99 | v4auth: true 100 | rootdirectory: / 101 | cache: 102 | blobdescriptor: 'inmemory' 103 | delete: 104 | enabled: true 105 | redirect: 106 | disable: true 107 | -------------------------------------------------------------------------------- /harbor/harbor/templates/notary/notary-server.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.notary.enabled }} 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: {{ template "harbor.notary-server" . }} 6 | labels: 7 | {{ include "harbor.labels" . | indent 4 }} 8 | component: notary-server 9 | spec: 10 | replicas: {{ .Values.notary.server.replicas }} 11 | selector: 12 | matchLabels: 13 | {{ include "harbor.matchLabels" . | indent 6 }} 14 | component: notary-server 15 | template: 16 | metadata: 17 | labels: 18 | {{ include "harbor.labels" . | indent 8 }} 19 | component: notary-server 20 | annotations: 21 | checksum/secret: {{ include (print $.Template.BasePath "/notary/notary-secret.yaml") . | sha256sum }} 22 | checksum/secret-core: {{ include (print $.Template.BasePath "/core/core-secret.yaml") . | sha256sum }} 23 | {{- if .Values.notary.podAnnotations }} 24 | {{ toYaml .Values.notary.podAnnotations | indent 8 }} 25 | {{- end }} 26 | spec: 27 | securityContext: 28 | fsGroup: 10000 29 | {{- if .Values.notary.server.serviceAccountName }} 30 | serviceAccountName: {{ .Values.notary.server.serviceAccountName }} 31 | {{- end -}} 32 | {{- with .Values.imagePullSecrets }} 33 | imagePullSecrets: 34 | {{- toYaml . | nindent 8 }} 35 | {{- end }} 36 | containers: 37 | - name: notary-server 38 | image: {{ .Values.notary.server.image.repository }}:{{ .Values.notary.server.image.tag }} 39 | imagePullPolicy: {{ .Values.imagePullPolicy }} 40 | {{- if .Values.notary.server.resources }} 41 | resources: 42 | {{ toYaml .Values.notary.server.resources | indent 10 }} 43 | {{- end }} 44 | env: 45 | - name: MIGRATIONS_PATH 46 | value: migrations/server/postgresql 47 | - name: DB_URL 48 | value: {{ template "harbor.database.notaryServer" . }} 49 | volumeMounts: 50 | - name: config 51 | mountPath: /etc/notary/server-config.postgres.json 52 | subPath: server.json 53 | - name: token-service-certificate 54 | mountPath: /root.crt 55 | subPath: tls.crt 56 | - name: signer-certificate 57 | mountPath: /etc/ssl/notary/ca.crt 58 | subPath: ca.crt 59 | volumes: 60 | - name: config 61 | secret: 62 | secretName: "{{ template "harbor.notary-server" . }}" 63 | - name: token-service-certificate 64 | secret: 65 | {{- if .Values.core.secretName }} 66 | secretName: {{ .Values.core.secretName }} 67 | {{- else }} 68 | secretName: {{ template "harbor.core" . }} 69 | {{- end }} 70 | - name: signer-certificate 71 | secret: 72 | {{- if .Values.notary.secretName }} 73 | secretName: {{ .Values.notary.secretName }} 74 | {{- else }} 75 | secretName: {{ template "harbor.notary-server" . }} 76 | {{- end }} 77 | {{- with .Values.notary.nodeSelector }} 78 | nodeSelector: 79 | {{ toYaml . | indent 8 }} 80 | {{- end }} 81 | {{- with .Values.notary.affinity }} 82 | affinity: 83 | {{ toYaml . | indent 8 }} 84 | {{- end }} 85 | {{- with .Values.notary.tolerations }} 86 | tolerations: 87 | {{ toYaml . | indent 8 }} 88 | {{- end }} 89 | {{ end }} 90 | -------------------------------------------------------------------------------- /harbor/harbor/templates/portal/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: "{{ template "harbor.portal" . }}" 5 | labels: 6 | {{ include "harbor.labels" . | indent 4 }} 7 | component: portal 8 | spec: 9 | replicas: {{ .Values.portal.replicas }} 10 | selector: 11 | matchLabels: 12 | {{ include "harbor.matchLabels" . | indent 6 }} 13 | component: portal 14 | template: 15 | metadata: 16 | labels: 17 | {{ include "harbor.matchLabels" . | indent 8 }} 18 | component: portal 19 | annotations: 20 | {{- if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "auto") }} 21 | checksum/tls: {{ include (print $.Template.BasePath "/internal/auto-tls.yaml") . | sha256sum }} 22 | {{- else if and .Values.internalTLS.enabled (eq .Values.internalTLS.certSource "manual") }} 23 | checksum/tls: {{ include (print $.Template.BasePath "/portal/tls.yaml") . | sha256sum }} 24 | {{- end }} 25 | {{- if .Values.portal.podAnnotations }} 26 | {{ toYaml .Values.portal.podAnnotations | indent 8 }} 27 | {{- end }} 28 | spec: 29 | {{- with .Values.imagePullSecrets }} 30 | imagePullSecrets: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | {{- if .Values.portal.serviceAccountName }} 34 | serviceAccountName: {{ .Values.portal.serviceAccountName }} 35 | {{- end }} 36 | containers: 37 | - name: portal 38 | image: {{ .Values.portal.image.repository }}:{{ .Values.portal.image.tag }} 39 | imagePullPolicy: {{ .Values.imagePullPolicy }} 40 | {{- if .Values.portal.resources }} 41 | resources: 42 | {{ toYaml .Values.portal.resources | indent 10 }} 43 | {{- end }} 44 | livenessProbe: 45 | httpGet: 46 | path: / 47 | scheme: {{ include "harbor.component.scheme" . | upper }} 48 | port: {{ template "harbor.portal.containerPort" . }} 49 | initialDelaySeconds: 300 50 | periodSeconds: 10 51 | readinessProbe: 52 | httpGet: 53 | path: / 54 | scheme: {{ include "harbor.component.scheme" . | upper }} 55 | port: {{ template "harbor.portal.containerPort" . }} 56 | initialDelaySeconds: 1 57 | periodSeconds: 10 58 | ports: 59 | - containerPort: {{ template "harbor.portal.containerPort" . }} 60 | volumeMounts: 61 | - name: portal-config 62 | mountPath: /etc/nginx/nginx.conf 63 | subPath: nginx.conf 64 | {{- if .Values.internalTLS.enabled }} 65 | - name: portal-internal-certs 66 | mountPath: /etc/harbor/ssl/portal 67 | {{- end }} 68 | volumes: 69 | - name: portal-config 70 | configMap: 71 | name: "{{ template "harbor.portal" . }}" 72 | {{- if .Values.internalTLS.enabled }} 73 | - name: portal-internal-certs 74 | secret: 75 | secretName: {{ template "harbor.internalTLS.portal.secretName" . }} 76 | {{- end }} 77 | {{- with .Values.portal.nodeSelector }} 78 | nodeSelector: 79 | {{ toYaml . | indent 8 }} 80 | {{- end }} 81 | {{- with .Values.portal.affinity }} 82 | affinity: 83 | {{ toYaml . | indent 8 }} 84 | {{- end }} 85 | {{- with .Values.portal.tolerations }} 86 | tolerations: 87 | {{ toYaml . | indent 8 }} 88 | {{- end }} 89 | -------------------------------------------------------------------------------- /consul/server/consul-server-sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: consul-server 5 | namespace: public-service 6 | spec: 7 | serviceName: consul-server 8 | replicas: 3 9 | updateStrategy: 10 | type: RollingUpdate 11 | selector: 12 | matchLabels: 13 | app: consul 14 | component: server 15 | template: 16 | metadata: 17 | labels: 18 | app: consul 19 | component: server 20 | spec: 21 | affinity: 22 | podAntiAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | - labelSelector: 25 | matchExpressions: 26 | - key: "componment" 27 | operator: In 28 | values: 29 | - server 30 | topologyKey: "kubernetes.io/hostname" 31 | terminationGracePeriodSeconds: 10 32 | containers: 33 | - name: consul 34 | image: consul:latest 35 | imagePullPolicy: IfNotPresent 36 | ports: 37 | - containerPort: 8500 38 | name: http 39 | - containerPort: 8600 40 | name: dns-tcp 41 | protocol: TCP 42 | - containerPort: 8600 43 | name: dns-udp 44 | protocol: UDP 45 | - containerPort: 8301 46 | name: serflan 47 | - containerPort: 8302 48 | name: serfwan 49 | - containerPort: 8300 50 | name: server 51 | env: 52 | - name: POD_IP 53 | valueFrom: 54 | fieldRef: 55 | fieldPath: status.podIP 56 | - name: NAMESPACE 57 | valueFrom: 58 | fieldRef: 59 | fieldPath: metadata.namespace 60 | args: 61 | - "agent" 62 | - "-server" 63 | - "-advertise=$(POD_IP)" 64 | - "-bind=0.0.0.0" 65 | - "-bootstrap-expect=3" 66 | - "-datacenter=dc1" 67 | - "-config-dir=/consul/userconfig" 68 | - "-data-dir=/consul/data" 69 | - "-disable-host-node-id" 70 | - "-domain=cluster.local" 71 | - "-retry-join=consul-server-0.consul-server.$(NAMESPACE).svc.cluster.local" 72 | - "-client=0.0.0.0" 73 | - "-ui" 74 | resources: 75 | limits: 76 | cpu: "100m" 77 | memory: "128Mi" 78 | requests: 79 | cpu: "100m" 80 | memory: "128Mi" 81 | lifecycle: 82 | preStop: 83 | exec: 84 | command: 85 | - /bin/sh 86 | - -c 87 | - consul leave 88 | volumeMounts: 89 | - name: data 90 | mountPath: /consul/data 91 | - name: user-config 92 | mountPath: /consul/userconfig 93 | volumes: 94 | - name: user-config 95 | configMap: 96 | name: consul-server-config 97 | - name: data 98 | emptyDir: {} 99 | securityContext: 100 | fsGroup: 1000 101 | 102 | # volumeClaimTemplates: 103 | # - metadata: 104 | # name: data 105 | # spec: 106 | # accessModes: 107 | # - ReadWriteMany 108 | # storageClassName: "gluster-heketi-2" 109 | # resources: 110 | # requests: 111 | # storage: 10Gi 112 | --------------------------------------------------------------------------------