├── .gitignore ├── LICENSE ├── README.md ├── addons ├── Eureka │ ├── eureka-01.yaml │ ├── eureka-02.yaml │ ├── eureka-ds.yaml │ └── eureka-init.yaml ├── Ingress-traefic │ ├── traefik-deployment.yaml │ ├── traefik-ds.yaml │ ├── traefik-rbac.yaml │ └── ui.yaml ├── Kube-dns │ └── kube-dns.yaml ├── apiserver-to-kubelet-rbac.yml ├── coredns │ ├── coredns-bak.yaml │ └── coredns.yaml ├── dashboard-bak │ ├── admin-user-sa-rbac.yaml │ ├── kubernetes-dashboard.yaml │ ├── ui-admin-rbac.yaml │ └── ui-read-rbac.yaml ├── dashboard │ ├── admin-user.yaml │ ├── kubernetes-dashboard-ingress.yaml │ └── kubernetes-dashboard.yaml ├── external-storage │ └── nfs-client │ │ ├── nfs-deployment.yaml │ │ └── nfs-rbac.yaml ├── heapster │ ├── grafana.yaml │ ├── heapster.yaml │ └── influxdb.yaml ├── ingress-prometheus.yaml ├── metrics-server │ ├── metrics-server-1.12up.yaml │ └── metrics-server-kubeadm.yaml ├── nginx-ingress-dm │ ├── ingress-controller-cm.yml │ ├── ingress-controller-ds.yml │ ├── ingress-controller-rbac.yml │ ├── ingress-controller-sa.yml │ └── ingress-controller-svc.yml ├── nginx-ingress │ ├── ingress-controller-cm.yml │ ├── ingress-controller-deploy.yml │ ├── ingress-controller-lb.yaml │ ├── ingress-controller-rbac.yml │ ├── ingress-controller-sa.yml │ └── ingress-controller-svc.yml ├── prometheus-all │ ├── 00namespace-namespace.yaml │ ├── adapter │ │ ├── prometheus-adapter-apiService.yaml │ │ ├── prometheus-adapter-clusterRole.yaml │ │ ├── prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml │ │ ├── prometheus-adapter-clusterRoleBinding.yaml │ │ ├── prometheus-adapter-clusterRoleBindingDelegator.yaml │ │ ├── prometheus-adapter-clusterRoleServerResources.yaml │ │ ├── prometheus-adapter-configMap.yaml │ │ ├── prometheus-adapter-deployment.yaml │ │ ├── prometheus-adapter-roleBindingAuthReader.yaml │ │ ├── prometheus-adapter-service.yaml │ │ └── prometheus-adapter-serviceAccount.yaml │ ├── alertmanager │ │ ├── alertmanager-alertmanager.yaml │ │ ├── alertmanager-secret.yaml │ │ ├── alertmanager-service.yaml │ │ └── alertmanager-serviceAccount.yaml │ ├── grafana │ │ ├── grafana-dashboardDatasources.yaml │ │ ├── grafana-dashboardDefinitions.yaml │ │ ├── grafana-dashboardSources.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-service.yaml │ │ └── grafana-serviceAccount.yaml │ ├── ingress-prometheus │ │ └── ing.yaml │ ├── kube-state-metrics │ │ ├── kube-state-metrics-clusterRole.yaml │ │ ├── kube-state-metrics-clusterRoleBinding.yaml │ │ ├── kube-state-metrics-deployment.yaml │ │ ├── kube-state-metrics-role.yaml │ │ ├── kube-state-metrics-roleBinding.yaml │ │ ├── kube-state-metrics-service.yaml │ │ └── kube-state-metrics-serviceAccount.yaml │ ├── node-exporter │ │ ├── node-exporter-clusterRole.yaml │ │ ├── node-exporter-clusterRoleBinding.yaml │ │ ├── node-exporter-daemonset.yaml │ │ ├── node-exporter-service.yaml │ │ └── node-exporter-serviceAccount.yaml │ ├── operator │ │ ├── 0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ │ ├── 0prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ │ ├── 0prometheus-operator-0prometheusCustomResourceDefinition.yaml │ │ ├── 0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ │ ├── 0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ │ ├── 0prometheus-operator-clusterRole.yaml │ │ ├── 0prometheus-operator-clusterRoleBinding.yaml │ │ ├── 0prometheus-operator-deployment.yaml │ │ ├── 0prometheus-operator-service.yaml │ │ └── 0prometheus-operator-serviceAccount.yaml │ ├── prometheus │ │ ├── prometheus-clusterRole.yaml │ │ ├── prometheus-clusterRoleBinding.yaml │ │ ├── prometheus-prometheus.yaml │ │ ├── prometheus-roleBindingConfig.yaml │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ │ ├── prometheus-roleConfig.yaml │ │ ├── prometheus-roleSpecificNamespaces.yaml │ │ ├── prometheus-rules.yaml │ │ ├── prometheus-service.yaml │ │ └── prometheus-serviceAccount.yaml │ └── serviceMonitor │ │ ├── 0prometheus-operator-serviceMonitor.yaml │ │ ├── alertmanager-serviceMonitor.yaml │ │ ├── grafana-serviceMonitor.yaml │ │ ├── kube-state-metrics-serviceMonitor.yaml │ │ ├── node-exporter-serviceMonitor.yaml │ │ ├── prometheus-serviceMonitor.yaml │ │ ├── prometheus-serviceMonitorApiserver.yaml │ │ ├── prometheus-serviceMonitorCoreDNS.yaml │ │ ├── prometheus-serviceMonitorKubeControllerManager.yaml │ │ ├── prometheus-serviceMonitorKubeScheduler.yaml │ │ └── prometheus-serviceMonitorKubelet.yaml └── traefic-ingress │ ├── daemonset.yml │ ├── ingress-rbac.yml │ └── traefik-ui.yml ├── apps ├── Jenkins │ ├── jenkins-deployment.yaml │ ├── jenkins-rbd-pvc.yaml │ ├── jenkins-statefulset.yaml │ ├── jenkins-svc.yaml │ └── rbac.yaml └── myapp-http-svc.yaml ├── docs ├── HPA.md ├── Ingress-Controller.md ├── Init-centos7.md ├── Jenkins │ ├── Jenkins-dynamic-slave.MD │ ├── Jenkins-dynamic-slave.pdf │ └── images │ │ ├── k8s-slave.png │ │ └── pod-labels.png ├── Jenkinsci.md ├── OpenVPN-admin.md ├── ca-install.md ├── coredns.md ├── dashboard.md ├── etcd-install.md ├── flannel-install.md ├── heapster.md ├── helm.md ├── ingress-nginx.md ├── ingress.png ├── init.md ├── kubeadmin-install.md ├── kubernetes-pv-pvc.md ├── master-install.md ├── metallb.md ├── metrics-server.md ├── nginx-install.md ├── node-install.md ├── prometheus.md ├── secret.md ├── update-kernel.md ├── 基于Ubuntu20.04.3 LTS和kubeadm部署k8s.md └── 架构设计.md ├── example ├── cronjob.yaml ├── harbor-secret.yaml ├── job.yaml ├── k8s-demo.txt ├── nfs-pv.yaml ├── nfs-pvc.yaml ├── nginx-daemonset.yaml ├── nginx-deployment-all.yaml ├── nginx-deployment-health.yaml ├── nginx-deployment-limit.yaml ├── nginx-deployment-pvc.yaml ├── nginx-deployment.yaml ├── nginx-ingress.yaml ├── nginx-pod.yaml ├── nginx-rc.yaml ├── nginx-rs.yaml ├── nginx-service-nodeport.yaml └── nginx-service.yaml ├── images ├── Snipaste_2019-11-07_17-29-32.png ├── ingress.png ├── jenkins-1.png ├── k8s3.png ├── k8s4.png ├── metallb.png ├── nginx-ingress-cn.png ├── p1.png ├── prometheus-jg.png ├── traefik.jpg ├── wx.png └── zfb.png ├── master ├── pillar ├── k8s.sls └── top.sls ├── roster └── salt ├── k8s ├── baseset.sls ├── etcd.sls ├── master.sls ├── modules │ ├── api-server.sls │ ├── baseos.sls │ ├── ca-file-generate.sls │ ├── calico.sls │ ├── cfssl.sls │ ├── cni.sls │ ├── controller-manager.sls │ ├── docker.sls │ ├── etcd.sls │ ├── kube-proxy.sls │ ├── kubectl.sls │ ├── kubelet-bootstrap-kubeconfig.sls │ ├── kubelet.sls │ ├── nginx.sls │ └── scheduler.sls ├── node.sls └── templates │ ├── baseos │ ├── 99-prophet.conf.template │ ├── ipvs.conf.template │ ├── k8s.sysctl.conf.template │ └── kubernetes.limits.conf.template │ ├── ca │ ├── admin-csr.json.template │ ├── ca-config.json │ ├── ca-csr.json │ ├── front-proxy-client-csr.json.template │ ├── kube-controller-manager-csr.json.template │ ├── kube-proxy-csr.json.template │ ├── kube-scheduler-csr.json.template │ ├── kubernetes-csr.json.template │ └── tls-bootstrap-secret-kubeconfig.sh.template │ ├── calico │ ├── calico.yaml.template │ └── coredns.yaml.template │ ├── cni │ └── 10-flannel.conflist.template │ ├── docker │ ├── 10-docker.conf.template │ └── daemon.json.template │ ├── etcd │ └── etcd.service.template │ ├── kube-api-server │ ├── apiserver-to-kubelet-rbac.yml.template │ ├── audit-policy.yml.template │ ├── bootstrap-token-secret.yml.template │ ├── csr-crb.yaml │ ├── encryption-config.yaml.template │ ├── kube-apiserver.service.template │ └── kubelet-bootstrap-rbac.yml.template │ ├── kube-controller-manager │ └── kube-controller-manager.service.template │ ├── kube-proxy │ ├── kube-proxy.config.yaml.template │ └── kube-proxy.service.template │ ├── kube-scheduler │ └── kube-scheduler.service.template │ ├── kubelet │ ├── 10-kubeadm.conf.template │ ├── kubelet-conf.yml.template │ └── kubelet.service.template │ └── nginx │ ├── kube-nginx.conf.template │ └── kube-nginx.service.template └── top.sls /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | .idea/ 12 | *.idea/ 13 | .idea/* 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | env/ 91 | venv/ 92 | ENV/ 93 | env.bak/ 94 | venv.bak/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 运维社区 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /addons/Eureka/eureka-01.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: eureka 5 | name: eureka-service 6 | labels: 7 | app: eureka-service 8 | spec: 9 | ports: 10 | - protocol: TCP 11 | port: 8761 12 | targetPort: 8761 13 | selector: 14 | app: pod-eureka-service 15 | type: ClusterIP 16 | clusterIP: None 17 | sessionAffinity: ClientIP 18 | 19 | --- 20 | kind: Service 21 | apiVersion: v1 22 | metadata: 23 | namespace: eureka 24 | name: service-eureka 25 | labels: 26 | app: service-eureka 27 | spec: 28 | ports: 29 | - protocol: TCP 30 | port: 8761 31 | targetPort: 8761 32 | nodePort: 20001 33 | selector: 34 | app: pod-eureka-service 35 | type: NodePort 36 | sessionAffinity: ClientIP 37 | 38 | --- 39 | kind: StatefulSet 40 | apiVersion: apps/v1beta2 41 | metadata: 42 | namespace: eureka 43 | name: statefulset-eureka-service-01 44 | labels: 45 | app: statefulset-eureka-service-01 46 | spec: 47 | serviceName: eureka-service 48 | replicas: 1 49 | selector: 50 | matchLabels: 51 | app: pod-eureka-service 52 | updateStrategy: 53 | type: RollingUpdate 54 | template: 55 | metadata: 56 | labels: 57 | app: pod-eureka-service 58 | spec: 59 | containers: 60 | - image: harbor.mofangge.net/english_word/eureka-server:test 61 | securityContext: 62 | privileged: true 63 | imagePullPolicy: IfNotPresent 64 | name: container-apollo-config-server-dev 65 | env: 66 | - name: EUREKA_INSTANCE_HOSTNAME 67 | value: "statefulset-eureka-service-01-0.eureka-service.eureka" 68 | - name: JAVA_OPTS 69 | value: "-Deureka.instance.preferIpAddress=false -Deureka.client.serviceUrl.defaultZone=http://statefulset-eureka-service-02-0.eureka-service.eureka:8761/eureka/" 70 | ports: 71 | - protocol: TCP 72 | containerPort: 8761 73 | 74 | readinessProbe: 75 | tcpSocket: 76 | port: 8761 77 | initialDelaySeconds: 10 78 | periodSeconds: 5 79 | 80 | livenessProbe: 81 | tcpSocket: 82 | port: 8761 83 | initialDelaySeconds: 120 84 | periodSeconds: 10 85 | 86 | dnsPolicy: ClusterFirst 87 | restartPolicy: Always 88 | -------------------------------------------------------------------------------- /addons/Eureka/eureka-02.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: eureka 5 | name: eureka-service 6 | labels: 7 | app: eureka-service 8 | spec: 9 | ports: 10 | - protocol: TCP 11 | port: 8761 12 | targetPort: 8761 13 | selector: 14 | app: pod-eureka-service 15 | type: ClusterIP 16 | clusterIP: None 17 | sessionAffinity: ClientIP 18 | 19 | --- 20 | kind: Service 21 | apiVersion: v1 22 | metadata: 23 | namespace: eureka 24 | name: service-eureka 25 | labels: 26 | app: service-eureka 27 | spec: 28 | ports: 29 | - protocol: TCP 30 | port: 8761 31 | targetPort: 8761 32 | nodePort: 20001 33 | selector: 34 | app: pod-eureka-service 35 | type: NodePort 36 | sessionAffinity: ClientIP 37 | 38 | --- 39 | kind: StatefulSet 40 | apiVersion: apps/v1beta2 41 | metadata: 42 | namespace: eureka 43 | name: statefulset-eureka-service-02 44 | labels: 45 | app: statefulset-eureka-service-02 46 | spec: 47 | serviceName: eureka-service 48 | replicas: 1 49 | selector: 50 | matchLabels: 51 | app: pod-eureka-service 52 | updateStrategy: 53 | type: RollingUpdate 54 | template: 55 | metadata: 56 | labels: 57 | app: pod-eureka-service 58 | spec: 59 | 60 | containers: 61 | - image: harbor.mofangge.net/english_word/eureka-server:test 62 | securityContext: 63 | privileged: true 64 | imagePullPolicy: IfNotPresent 65 | name: container-apollo-config-server-dev 66 | env: 67 | - name: EUREKA_INSTANCE_HOSTNAME 68 | value: "statefulset-eureka-service-02-0.eureka-service.eureka" 69 | - name: JAVA_OPTS 70 | value: "-Deureka.instance.preferIpAddress=false -Deureka.client.serviceUrl.defaultZone=http://statefulset-eureka-service-01-0.eureka-service.eureka:8761/eureka/" 71 | ports: 72 | - protocol: TCP 73 | containerPort: 8761 74 | 75 | readinessProbe: 76 | tcpSocket: 77 | port: 8761 78 | initialDelaySeconds: 10 79 | periodSeconds: 5 80 | 81 | livenessProbe: 82 | tcpSocket: 83 | port: 8761 84 | initialDelaySeconds: 120 85 | periodSeconds: 10 86 | 87 | dnsPolicy: ClusterFirst 88 | restartPolicy: Always 89 | -------------------------------------------------------------------------------- /addons/Eureka/eureka-ds.yaml: -------------------------------------------------------------------------------- 1 | #部署此eureka支持prometheus的自动发现 2 | #对spring cloud 以及spring boot 的版本有具体的要求,如若想添加相互注册请修改Java_opts 3 | # -Deureka.instance.preferIpAddress=false -Deureka.client.serviceUrl.defaultZone=${EUREKA_SERVER} 4 | --- 5 | apiVersion: v1 6 | kind: Service 7 | metadata: 8 | name: eureka 9 | labels: 10 | app: eureka 11 | spec: 12 | ports: 13 | - port: 8761 14 | name: eureka 15 | clusterIP: None 16 | selector: 17 | app: eureka 18 | --- 19 | apiVersion: apps/v1 20 | kind: StatefulSet 21 | metadata: 22 | name: eureka 23 | spec: 24 | serviceName: "eureka" 25 | replicas: 3 26 | selector: 27 | matchLabels: 28 | app: eureka 29 | template: 30 | metadata: 31 | labels: 32 | app: eureka 33 | spec: 34 | containers: 35 | - name: eureka 36 | image: freemanliu/eureka:v1.1.1 37 | ports: 38 | - containerPort: 8761 39 | resources: 40 | limits: 41 | # jvm会自动发现该限制 42 | memory: 1Gi 43 | env: 44 | - name: MY_POD_NAME 45 | valueFrom: 46 | fieldRef: 47 | fieldPath: metadata.name 48 | - name: JAVA_OPTS 49 | value: -XX:+UnlockExperimentalVMOptions 50 | -XX:+UseCGroupMemoryLimitForHeap 51 | -XX:MaxRAMFraction=2 52 | -XX:CICompilerCount=8 53 | -XX:ActiveProcessorCount=8 54 | -XX:+UseG1GC 55 | -XX:+AggressiveOpts 56 | -XX:+UseFastAccessorMethods 57 | -XX:+UseStringDeduplication 58 | -XX:+UseCompressedOops 59 | -XX:+OptimizeStringConcat 60 | - name: EUREKA_SERVER 61 | value: "http://eureka-0.eureka:8761/eureka/,http://eureka-1.eureka:8761/eureka/,http://eureka-2.eureka:8761/eureka/" 62 | - name: EUREKA_INSTANCE_HOSTNAME 63 | value: ${MY_POD_NAME}.eureka 64 | podManagementPolicy: "Parallel" -------------------------------------------------------------------------------- /addons/Eureka/eureka-init.yaml: -------------------------------------------------------------------------------- 1 | #定义service 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: eureka-service-internal 7 | labels: 8 | app: eureka-service-internal 9 | namespace: eureka 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - port: 8761 14 | protocol: TCP 15 | targetPort: 8761 16 | selector: 17 | app: pod-eureka 18 | type: ClusterIP 19 | 20 | #定义ingress供外网访问。 21 | --- 22 | apiVersion: extensions/v1beta1 23 | kind: Ingress 24 | metadata: 25 | name: eureka.mofangge.cc 26 | namespace: eureka 27 | annotations: 28 | kubernetes.io/ingress.class: "nginx" 29 | spec: 30 | tls: 31 | - secretName: mofangge.cc-eureka 32 | hosts: 33 | - eureka.mofangge.cc 34 | rules: 35 | - host: eureka.mofangge.cc 36 | http: 37 | paths: 38 | - backend: 39 | serviceName: eureka-service-internal 40 | servicePort: 8761 41 | #定义statefulset有状态应用 42 | --- 43 | apiVersion: apps/v1beta1 44 | kind: StatefulSet 45 | metadata: 46 | name: pod-eureka 47 | namespace: eureka 48 | labels: 49 | app: pod-eureka 50 | spec: 51 | serviceName: eureka-service-internal 52 | replicas: 2 53 | selector: 54 | matchLabels: 55 | app: pod-eureka 56 | updateStrategy: 57 | type: RollingUpdate 58 | template: 59 | metadata: 60 | labels: 61 | app: pod-eureka 62 | spec: 63 | terminationGracePeriodSeconds: 10 64 | containers: 65 | - image: harbor.mofangge.net/english_word/eureka-server:test 66 | imagePullPolicy: IfNotPresent 67 | name: pod-eureka 68 | env: 69 | - name: JAVA_OPTS 70 | value: "-Deureka.instance.preferIpAddress=false -Deureka.client.serviceUrl.defaultZone=http://pod-eureka-0.eureka-service-internal.eureka:8761/eureka/,http://pod-eureka-1.eureka-service-internal.eureka:8761/eureka/" 71 | ports: 72 | - protocol: TCP 73 | containerPort: 8761 74 | readinessProbe: 75 | tcpSocket: 76 | port: 8761 77 | initialDelaySeconds: 10 78 | periodSeconds: 5 79 | 80 | livenessProbe: 81 | tcpSocket: 82 | port: 8761 83 | initialDelaySeconds: 120 84 | periodSeconds: 10 85 | dnsPolicy: ClusterFirst 86 | -------------------------------------------------------------------------------- /addons/Ingress-traefic/traefik-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: traefik-ingress-controller 6 | namespace: kube-system 7 | --- 8 | kind: Deployment 9 | apiVersion: extensions/v1beta1 10 | metadata: 11 | name: traefik-ingress-controller 12 | namespace: kube-system 13 | labels: 14 | k8s-app: traefik-ingress-lb 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | k8s-app: traefik-ingress-lb 20 | template: 21 | metadata: 22 | labels: 23 | k8s-app: traefik-ingress-lb 24 | name: traefik-ingress-lb 25 | spec: 26 | serviceAccountName: traefik-ingress-controller 27 | terminationGracePeriodSeconds: 60 28 | containers: 29 | - image: traefik 30 | name: traefik-ingress-lb 31 | ports: 32 | - name: http 33 | containerPort: 80 34 | - name: admin 35 | containerPort: 8080 36 | args: 37 | - --api 38 | - --kubernetes 39 | - --logLevel=INFO 40 | --- 41 | kind: Service 42 | apiVersion: v1 43 | metadata: 44 | name: traefik-ingress-service 45 | namespace: kube-system 46 | spec: 47 | selector: 48 | k8s-app: traefik-ingress-lb 49 | ports: 50 | - protocol: TCP 51 | port: 80 52 | name: web 53 | nodePort: 30070 54 | - protocol: TCP 55 | port: 8080 56 | name: admin 57 | nodePort: 30071 58 | type: NodePort 59 | -------------------------------------------------------------------------------- /addons/Ingress-traefic/traefik-ds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: traefik-ingress-controller 6 | namespace: kube-system 7 | --- 8 | kind: DaemonSet 9 | apiVersion: extensions/v1beta1 10 | metadata: 11 | name: traefik-ingress-controller 12 | namespace: kube-system 13 | labels: 14 | k8s-app: traefik-ingress-lb 15 | spec: 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: traefik-ingress-lb 20 | name: traefik-ingress-lb 21 | spec: 22 | serviceAccountName: traefik-ingress-controller 23 | terminationGracePeriodSeconds: 60 24 | containers: 25 | - image: traefik 26 | name: traefik-ingress-lb 27 | ports: 28 | - name: http 29 | containerPort: 80 30 | hostPort: 80 31 | - name: admin 32 | containerPort: 8080 33 | hostPort: 8080 34 | securityContext: 35 | capabilities: 36 | drop: 37 | - ALL 38 | add: 39 | - NET_BIND_SERVICE 40 | args: 41 | - --api 42 | - --kubernetes 43 | - --logLevel=INFO 44 | --- 45 | kind: Service 46 | apiVersion: v1 47 | metadata: 48 | name: traefik-ingress-service 49 | namespace: kube-system 50 | spec: 51 | selector: 52 | k8s-app: traefik-ingress-lb 53 | type: NodePort 54 | ports: 55 | - protocol: TCP 56 | port: 80 57 | name: web 58 | nodePort: 30070 59 | - protocol: TCP 60 | port: 8080 61 | name: admin 62 | nodePort: 30071 63 | -------------------------------------------------------------------------------- /addons/Ingress-traefic/traefik-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | metadata: 5 | name: traefik-ingress-controller 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - secrets 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - extensions 19 | resources: 20 | - ingresses 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | --- 26 | kind: ClusterRoleBinding 27 | apiVersion: rbac.authorization.k8s.io/v1beta1 28 | metadata: 29 | name: traefik-ingress-controller 30 | roleRef: 31 | apiGroup: rbac.authorization.k8s.io 32 | kind: ClusterRole 33 | name: traefik-ingress-controller 34 | subjects: 35 | - kind: ServiceAccount 36 | name: traefik-ingress-controller 37 | namespace: kube-system 38 | -------------------------------------------------------------------------------- /addons/Ingress-traefic/ui.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: traefik-web-ui 6 | namespace: kube-system 7 | spec: 8 | selector: 9 | k8s-app: traefik-ingress-lb 10 | ports: 11 | - name: web 12 | port: 80 13 | targetPort: 8080 14 | --- 15 | apiVersion: extensions/v1beta1 16 | kind: Ingress 17 | metadata: 18 | name: traefik-web-ui 19 | namespace: kube-system 20 | spec: 21 | rules: 22 | - host: traefik-ui.mofangge.cc 23 | http: 24 | paths: 25 | - path: / 26 | backend: 27 | serviceName: traefik-web-ui 28 | servicePort: web 29 | -------------------------------------------------------------------------------- /addons/apiserver-to-kubelet-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: 5 | rbac.authorization.kubernetes.io/autoupdate: "true" 6 | labels: 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:kube-apiserver-to-kubelet 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - nodes/proxy 14 | - nodes/stats 15 | - nodes/log 16 | - nodes/spec 17 | - nodes/metrics 18 | - pods/log 19 | verbs: 20 | - "*" 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: system:kube-apiserver 26 | namespace: "" 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: system:kube-apiserver-to-kubelet 31 | subjects: 32 | - apiGroup: rbac.authorization.k8s.io 33 | kind: User 34 | name: kubernetes -------------------------------------------------------------------------------- /addons/coredns/coredns-bak.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | --- 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRole 12 | metadata: 13 | labels: 14 | kubernetes.io/bootstrapping: rbac-defaults 15 | addonmanager.kubernetes.io/mode: Reconcile 16 | name: system:coredns 17 | rules: 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - endpoints 22 | - services 23 | - pods 24 | - namespaces 25 | verbs: 26 | - list 27 | - watch 28 | --- 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | kind: ClusterRoleBinding 31 | metadata: 32 | annotations: 33 | rbac.authorization.kubernetes.io/autoupdate: "true" 34 | labels: 35 | kubernetes.io/bootstrapping: rbac-defaults 36 | addonmanager.kubernetes.io/mode: EnsureExists 37 | name: system:coredns 38 | roleRef: 39 | apiGroup: rbac.authorization.k8s.io 40 | kind: ClusterRole 41 | name: system:coredns 42 | subjects: 43 | - kind: ServiceAccount 44 | name: coredns 45 | namespace: kube-system 46 | --- 47 | apiVersion: v1 48 | kind: ConfigMap 49 | metadata: 50 | name: coredns 51 | namespace: kube-system 52 | labels: 53 | addonmanager.kubernetes.io/mode: EnsureExists 54 | data: 55 | Corefile: | 56 | .:53 { 57 | errors 58 | health 59 | kubernetes cluster.local. in-addr.arpa ip6.arpa { 60 | pods insecure 61 | upstream 62 | fallthrough in-addr.arpa ip6.arpa 63 | } 64 | prometheus :9153 65 | proxy . /etc/resolv.conf 66 | cache 30 67 | } 68 | --- 69 | apiVersion: extensions/v1beta1 70 | kind: Deployment 71 | metadata: 72 | name: coredns 73 | namespace: kube-system 74 | labels: 75 | k8s-app: coredns 76 | kubernetes.io/cluster-service: "true" 77 | addonmanager.kubernetes.io/mode: Reconcile 78 | kubernetes.io/name: "CoreDNS" 79 | spec: 80 | replicas: 2 81 | strategy: 82 | type: RollingUpdate 83 | rollingUpdate: 84 | maxUnavailable: 1 85 | selector: 86 | matchLabels: 87 | k8s-app: coredns 88 | template: 89 | metadata: 90 | labels: 91 | k8s-app: coredns 92 | spec: 93 | serviceAccountName: coredns 94 | tolerations: 95 | - key: node-role.kubernetes.io/master 96 | effect: NoSchedule 97 | - key: "CriticalAddonsOnly" 98 | operator: "Exists" 99 | containers: 100 | - name: coredns 101 | image: coredns/coredns:1.0.6 102 | imagePullPolicy: IfNotPresent 103 | resources: 104 | limits: 105 | memory: 170Mi 106 | requests: 107 | cpu: 100m 108 | memory: 70Mi 109 | args: [ "-conf", "/etc/coredns/Corefile" ] 110 | volumeMounts: 111 | - name: config-volume 112 | mountPath: /etc/coredns 113 | ports: 114 | - containerPort: 53 115 | name: dns 116 | protocol: UDP 117 | - containerPort: 53 118 | name: dns-tcp 119 | protocol: TCP 120 | livenessProbe: 121 | httpGet: 122 | path: /health 123 | port: 8080 124 | scheme: HTTP 125 | initialDelaySeconds: 60 126 | timeoutSeconds: 5 127 | successThreshold: 1 128 | failureThreshold: 5 129 | dnsPolicy: Default 130 | volumes: 131 | - name: config-volume 132 | configMap: 133 | name: coredns 134 | items: 135 | - key: Corefile 136 | path: Corefile 137 | --- 138 | apiVersion: v1 139 | kind: Service 140 | metadata: 141 | name: coredns 142 | namespace: kube-system 143 | labels: 144 | k8s-app: coredns 145 | kubernetes.io/cluster-service: "true" 146 | addonmanager.kubernetes.io/mode: Reconcile 147 | kubernetes.io/name: "CoreDNS" 148 | spec: 149 | selector: 150 | k8s-app: coredns 151 | clusterIP: 10.96.0.2 152 | ports: 153 | - name: dns 154 | port: 53 155 | protocol: UDP 156 | - name: dns-tcp 157 | port: 53 158 | protocol: TCP 159 | -------------------------------------------------------------------------------- /addons/dashboard-bak/admin-user-sa-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRoleBinding 10 | metadata: 11 | name: admin-user 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: cluster-admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: admin-user 19 | namespace: kube-system 20 | 21 | -------------------------------------------------------------------------------- /addons/dashboard-bak/ui-admin-rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: ui-admin 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - services 10 | - services/proxy 11 | verbs: 12 | - '*' 13 | 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: RoleBinding 17 | metadata: 18 | name: ui-admin-binding 19 | namespace: kube-system 20 | roleRef: 21 | apiGroup: rbac.authorization.k8s.io 22 | kind: ClusterRole 23 | name: ui-admin 24 | subjects: 25 | - apiGroup: rbac.authorization.k8s.io 26 | kind: User 27 | name: admin 28 | -------------------------------------------------------------------------------- /addons/dashboard-bak/ui-read-rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: ui-read 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - services 10 | - services/proxy 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: RoleBinding 19 | metadata: 20 | name: ui-read-binding 21 | namespace: kube-system 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: ClusterRole 25 | name: ui-read 26 | subjects: 27 | - apiGroup: rbac.authorization.k8s.io 28 | kind: User 29 | name: readonly 30 | -------------------------------------------------------------------------------- /addons/dashboard/admin-user.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: admin-user 11 | annotations: 12 | rbac.authorization.kubernetes.io/autoupdate: "true" 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: cluster-admin 17 | subjects: 18 | - kind: ServiceAccount 19 | name: admin-user 20 | namespace: kube-system 21 | 22 | --- 23 | #如果直接用官方提供的该文件创建dashboard,由于创建的用户kubernetes-dashboard绑定的角色为kubernetes-dashboard-minimal, 24 | #由于改角色并没有访问和操作集群的权限,因此登陆dashboard的时候,会提示权限错误:“configmaps is forbidden: User "system:serviceaccount:kube-system:kubernetes-dashboard"。 25 | #因此需修改RoleBinding的相关参数,绑定权限更高的角色 26 | kind: ClusterRoleBinding 27 | apiVersion: rbac.authorization.k8s.io/v1beta1 28 | metadata: 29 | name: kubernetes-dashboard 30 | subjects: 31 | - kind: ServiceAccount 32 | name: kubernetes-dashboard 33 | namespace: kube-system 34 | roleRef: 35 | kind: ClusterRole 36 | name: cluster-admin 37 | apiGroup: rbac.authorization.k8s.io 38 | -------------------------------------------------------------------------------- /addons/external-storage/nfs-client/nfs-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nfs-client-provisioner 5 | labels: 6 | app: nfs-client-provisioner 7 | # replace with namespace where provisioner is deployed 8 | namespace: default 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: nfs-client-provisioner 14 | strategy: 15 | type: Recreate 16 | selector: 17 | matchLabels: 18 | app: nfs-client-provisioner 19 | template: 20 | metadata: 21 | labels: 22 | app: nfs-client-provisioner 23 | spec: 24 | serviceAccountName: nfs-client-provisioner 25 | containers: 26 | - name: nfs-client-provisioner 27 | image: quay.io/external_storage/nfs-client-provisioner:latest 28 | volumeMounts: 29 | - name: nfs-client-root 30 | mountPath: /persistentvolumes 31 | env: 32 | - name: PROVISIONER_NAME 33 | value: fuseim.pri/ifs 34 | - name: NFS_SERVER 35 | value: 10.10.10.60 36 | - name: NFS_PATH 37 | value: /ifs/kubernetes 38 | volumes: 39 | - name: nfs-client-root 40 | nfs: 41 | server: 10.10.10.60 42 | path: /ifs/kubernetes -------------------------------------------------------------------------------- /addons/external-storage/nfs-client/nfs-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nfs-client-provisioner 5 | # replace with namespace where provisioner is deployed 6 | namespace: kube-system 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: nfs-client-provisioner-runner 12 | rules: 13 | - apiGroups: [""] 14 | resources: ["persistentvolumes"] 15 | verbs: ["get", "list", "watch", "create", "delete"] 16 | - apiGroups: [""] 17 | resources: ["persistentvolumeclaims"] 18 | verbs: ["get", "list", "watch", "update"] 19 | - apiGroups: ["storage.k8s.io"] 20 | resources: ["storageclasses"] 21 | verbs: ["get", "list", "watch"] 22 | - apiGroups: [""] 23 | resources: ["events"] 24 | verbs: ["watch, ""create", "update", "patch"] 25 | - apiGroups: [""] 26 | resources: ["services", "endpoints"] 27 | verbs: ["get","create","list", "watch","update"] 28 | - apiGroups: ["extensions"] 29 | resources: ["podsecuritypolicies"] 30 | resourceNames: ["nfs-client-provisioner"] 31 | verbs: ["use"] 32 | --- 33 | kind: ClusterRoleBinding 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | metadata: 36 | name: run-nfs-client-provisioner 37 | subjects: 38 | - kind: ServiceAccount 39 | name: nfs-client-provisioner 40 | # replace with namespace where provisioner is deployed 41 | namespace: kube-system 42 | roleRef: 43 | kind: ClusterRole 44 | name: nfs-client-provisioner-runner 45 | apiGroup: rbac.authorization.k8s.io 46 | # --- 47 | # kind: Role 48 | # apiVersion: rbac.authorization.k8s.io/v1 49 | # metadata: 50 | # name: leader-locking-nfs-client-provisioner 51 | # # replace with namespace where provisioner is deployed 52 | # namespace: default 53 | # rules: 54 | # - apiGroups: [""] 55 | # resources: ["endpoints"] 56 | # verbs: ["get", "list", "watch", "create", "update", "patch"] 57 | # --- 58 | # kind: RoleBinding 59 | # apiVersion: rbac.authorization.k8s.io/v1 60 | # metadata: 61 | # name: leader-locking-nfs-client-provisioner 62 | # subjects: 63 | # - kind: ServiceAccount 64 | # name: nfs-client-provisioner 65 | # # replace with namespace where provisioner is deployed 66 | # namespace: default 67 | # roleRef: 68 | # kind: Role 69 | # name: leader-locking-nfs-client-provisioner 70 | # apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /addons/heapster/grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: monitoring-grafana 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | k8s-app: grafana 12 | template: 13 | metadata: 14 | labels: 15 | task: monitoring 16 | k8s-app: grafana 17 | spec: 18 | containers: 19 | - name: grafana 20 | #image: gcr.io/google_containers/heapster-grafana-amd64:v4.2.0 21 | image: mirrorgooglecontainers/heapster-grafana-amd64:v4.4.3 22 | ports: 23 | - containerPort: 3000 24 | protocol: TCP 25 | volumeMounts: 26 | - mountPath: /var 27 | name: grafana-storage 28 | env: 29 | - name: INFLUXDB_HOST 30 | value: monitoring-influxdb 31 | - name: GF_SERVER_HTTP_PORT 32 | value: "3000" 33 | # The following env variables are required to make Grafana accessible via 34 | # the kubernetes api-server proxy. On production clusters, we recommend 35 | # removing these env variables, setup auth for grafana, and expose the grafana 36 | # service using a LoadBalancer or a public IP. 37 | - name: GF_AUTH_BASIC_ENABLED 38 | value: "false" 39 | - name: GF_AUTH_ANONYMOUS_ENABLED 40 | value: "true" 41 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 42 | value: Admin 43 | - name: GF_SERVER_ROOT_URL 44 | # If you're only using the API Server proxy, set this value instead: 45 | value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy/ 46 | #value: / 47 | volumes: 48 | - name: grafana-storage 49 | emptyDir: {} 50 | --- 51 | apiVersion: v1 52 | kind: Service 53 | metadata: 54 | labels: 55 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 56 | # If you are NOT using this as an addon, you should comment out this line. 57 | kubernetes.io/cluster-service: 'true' 58 | kubernetes.io/name: monitoring-grafana 59 | name: monitoring-grafana 60 | namespace: kube-system 61 | spec: 62 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer 63 | # or through a public IP. 64 | # type: LoadBalancer 65 | # You could also use NodePort to expose the service at a randomly-generated port 66 | # type: NodePort 67 | ports: 68 | - port: 80 69 | targetPort: 3000 70 | selector: 71 | k8s-app: grafana 72 | -------------------------------------------------------------------------------- /addons/heapster/heapster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: heapster 6 | namespace: kube-system 7 | --- 8 | 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRoleBinding 11 | metadata: 12 | name: heapster 13 | subjects: 14 | - kind: ServiceAccount 15 | name: heapster 16 | namespace: kube-system 17 | roleRef: 18 | kind: ClusterRole 19 | name: system:heapster 20 | apiGroup: rbac.authorization.k8s.io 21 | --- 22 | 23 | apiVersion: apps/v1beta1 24 | kind: Deployment 25 | metadata: 26 | name: heapster 27 | namespace: kube-system 28 | spec: 29 | replicas: 1 30 | selector: 31 | matchLabels: 32 | k8s-app: heapster 33 | template: 34 | metadata: 35 | labels: 36 | task: monitoring 37 | k8s-app: heapster 38 | spec: 39 | serviceAccountName: heapster 40 | containers: 41 | - name: heapster 42 | #image: gcr.io/google_containers/heapster-amd64:v1.5.1 43 | image: mirrorgooglecontainers/heapster-amd64:v1.5.1 44 | imagePullPolicy: IfNotPresent 45 | command: 46 | - /heapster 47 | - --source=kubernetes:https://kubernetes.default 48 | - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086 49 | --- 50 | apiVersion: v1 51 | kind: Service 52 | metadata: 53 | labels: 54 | task: monitoring 55 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 56 | # If you are NOT using this as an addon, you should comment out this line. 57 | #kubernetes.io/cluster-service: 'true' 58 | kubernetes.io/name: Heapster 59 | name: heapster 60 | namespace: kube-system 61 | spec: 62 | ports: 63 | - port: 80 64 | targetPort: 8082 65 | selector: 66 | k8s-app: heapster 67 | -------------------------------------------------------------------------------- /addons/heapster/influxdb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: monitoring-influxdb 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | k8s-app: influxdb 12 | template: 13 | metadata: 14 | labels: 15 | task: monitoring 16 | k8s-app: influxdb 17 | spec: 18 | containers: 19 | - name: influxdb 20 | #image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3 21 | image: mirrorgooglecontainers/heapster-influxdb-amd64:v1.3.3 22 | volumeMounts: 23 | - mountPath: /data 24 | name: influxdb-storage 25 | volumes: 26 | - name: influxdb-storage 27 | emptyDir: {} 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | labels: 33 | task: monitoring 34 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 35 | # If you are NOT using this as an addon, you should comment out this line. 36 | # kubernetes.io/cluster-service: 'true' 37 | kubernetes.io/name: monitoring-influxdb 38 | name: monitoring-influxdb 39 | namespace: kube-system 40 | spec: 41 | ports: 42 | - port: 8086 43 | targetPort: 8086 44 | name: http 45 | selector: 46 | k8s-app: influxdb 47 | --- 48 | 49 | -------------------------------------------------------------------------------- /addons/ingress-prometheus.yaml: -------------------------------------------------------------------------------- 1 | 2 | #kubectl -n monitoring create secret tls mofangge.cc --key server.key --cert server.crt 3 | --- 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: prometheus.com 8 | namespace: monitoring 9 | annotations: 10 | kubernetes.io/ingress.class: "nginx" 11 | spec: 12 | tls: 13 | - secretName: mofangge.cc 14 | hosts: 15 | - prometheus.mofangge.cc 16 | - grafana.mofangge.cc 17 | rules: 18 | - host: prometheus.mofangge.cc 19 | http: 20 | paths: 21 | - backend: 22 | serviceName: prometheus-k8s 23 | servicePort: 9090 24 | - host: grafana.mofangge.cc 25 | http: 26 | paths: 27 | - backend: 28 | serviceName: grafana 29 | servicePort: 3000 30 | -------------------------------------------------------------------------------- /addons/metrics-server/metrics-server-1.12up.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: system:aggregated-metrics-reader 5 | labels: 6 | rbac.authorization.k8s.io/aggregate-to-view: "true" 7 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 8 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 9 | rules: 10 | - apiGroups: ["metrics.k8s.io"] 11 | resources: ["pods"] 12 | verbs: ["get", "list", "watch"] 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1beta1 15 | kind: ClusterRoleBinding 16 | metadata: 17 | name: metrics-server:system:auth-delegator 18 | roleRef: 19 | apiGroup: rbac.authorization.k8s.io 20 | kind: ClusterRole 21 | name: system:auth-delegator 22 | subjects: 23 | - kind: ServiceAccount 24 | name: metrics-server 25 | namespace: kube-system 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1beta1 28 | kind: RoleBinding 29 | metadata: 30 | name: metrics-server-auth-reader 31 | namespace: kube-system 32 | roleRef: 33 | apiGroup: rbac.authorization.k8s.io 34 | kind: Role 35 | name: extension-apiserver-authentication-reader 36 | subjects: 37 | - kind: ServiceAccount 38 | name: metrics-server 39 | namespace: kube-system 40 | --- 41 | apiVersion: apiregistration.k8s.io/v1beta1 42 | kind: APIService 43 | metadata: 44 | name: v1beta1.metrics.k8s.io 45 | spec: 46 | service: 47 | name: metrics-server 48 | namespace: kube-system 49 | group: metrics.k8s.io 50 | version: v1beta1 51 | insecureSkipTLSVerify: true 52 | groupPriorityMinimum: 100 53 | versionPriority: 100 54 | --- 55 | apiVersion: v1 56 | kind: ServiceAccount 57 | metadata: 58 | name: metrics-server 59 | namespace: kube-system 60 | --- 61 | apiVersion: extensions/v1beta1 62 | kind: Deployment 63 | metadata: 64 | name: metrics-server 65 | namespace: kube-system 66 | labels: 67 | k8s-app: metrics-server 68 | spec: 69 | selector: 70 | matchLabels: 71 | k8s-app: metrics-server 72 | template: 73 | metadata: 74 | name: metrics-server 75 | labels: 76 | k8s-app: metrics-server 77 | spec: 78 | serviceAccountName: metrics-server 79 | containers: 80 | - name: metrics-server 81 | image: zhangguanzhang/gcr.io.google_containers.metrics-server-amd64:v0.3.1 82 | imagePullPolicy: IfNotPresent 83 | volumeMounts: 84 | - mountPath: /etc/kubernetes/pki 85 | name: ca-ssl 86 | command: 87 | - /metrics-server 88 | - --metric-resolution=30s 89 | - --kubelet-port=10255 90 | - --deprecated-kubelet-completely-insecure=true 91 | - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname 92 | - --requestheader-client-ca-file=/etc/kubernetes/pki/ca.pem 93 | - --requestheader-username-headers=X-Remote-User 94 | - --requestheader-group-headers=X-Remote-Group 95 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 96 | volumes: 97 | - name: ca-ssl 98 | hostPath: 99 | path: /opt/kubernetes/ssl 100 | --- 101 | apiVersion: v1 102 | kind: Service 103 | metadata: 104 | name: metrics-server 105 | namespace: kube-system 106 | labels: 107 | kubernetes.io/name: "Metrics-server" 108 | spec: 109 | selector: 110 | k8s-app: metrics-server 111 | ports: 112 | - port: 443 113 | protocol: TCP 114 | targetPort: 443 115 | --- 116 | apiVersion: rbac.authorization.k8s.io/v1 117 | kind: ClusterRole 118 | metadata: 119 | name: system:metrics-server 120 | rules: 121 | - apiGroups: 122 | - "" 123 | resources: 124 | - pods 125 | - nodes 126 | - nodes/stats 127 | - namespaces 128 | verbs: 129 | - get 130 | - list 131 | - watch 132 | - apiGroups: 133 | - "extensions" 134 | resources: 135 | - deployments 136 | verbs: 137 | - get 138 | - list 139 | - watch 140 | --- 141 | apiVersion: rbac.authorization.k8s.io/v1 142 | kind: ClusterRoleBinding 143 | metadata: 144 | name: system:metrics-server 145 | roleRef: 146 | apiGroup: rbac.authorization.k8s.io 147 | kind: ClusterRole 148 | name: system:metrics-server 149 | subjects: 150 | - kind: ServiceAccount 151 | name: metrics-server 152 | namespace: kube-system -------------------------------------------------------------------------------- /addons/metrics-server/metrics-server-kubeadm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: metrics-server:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: metrics-server 12 | namespace: kube-system 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1beta1 15 | kind: RoleBinding 16 | metadata: 17 | name: metrics-server-auth-reader 18 | namespace: kube-system 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: Role 22 | name: extension-apiserver-authentication-reader 23 | subjects: 24 | - kind: ServiceAccount 25 | name: metrics-server 26 | namespace: kube-system 27 | --- 28 | apiVersion: apiregistration.k8s.io/v1beta1 29 | kind: APIService 30 | metadata: 31 | name: v1beta1.metrics.k8s.io 32 | spec: 33 | service: 34 | name: metrics-server 35 | namespace: kube-system 36 | group: metrics.k8s.io 37 | version: v1beta1 38 | insecureSkipTLSVerify: true 39 | groupPriorityMinimum: 100 40 | versionPriority: 100 41 | --- 42 | apiVersion: v1 43 | kind: ServiceAccount 44 | metadata: 45 | name: metrics-server 46 | namespace: kube-system 47 | --- 48 | apiVersion: extensions/v1beta1 49 | kind: Deployment 50 | metadata: 51 | name: metrics-server 52 | namespace: kube-system 53 | labels: 54 | k8s-app: metrics-server 55 | spec: 56 | selector: 57 | matchLabels: 58 | k8s-app: metrics-server 59 | template: 60 | metadata: 61 | name: metrics-server 62 | labels: 63 | k8s-app: metrics-server 64 | spec: 65 | serviceAccountName: metrics-server 66 | containers: 67 | - name: metrics-server 68 | image: skymyyang/metrics-server-amd64:v0.3.3 69 | imagePullPolicy: Always 70 | volumeMounts: 71 | - mountPath: /etc/kubernetes/pki 72 | name: ca-ssl 73 | command: 74 | - /metrics-server 75 | - --kubelet-insecure-tls 76 | - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname 77 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt 78 | - --requestheader-username-headers=X-Remote-User 79 | - --requestheader-group-headers=X-Remote-Group 80 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 81 | volumes: 82 | - name: ca-ssl 83 | hostPath: 84 | path: /etc/kubernetes/pki 85 | --- 86 | apiVersion: v1 87 | kind: Service 88 | metadata: 89 | name: metrics-server 90 | namespace: kube-system 91 | labels: 92 | kubernetes.io/name: "Metrics-server" 93 | spec: 94 | selector: 95 | k8s-app: metrics-server 96 | ports: 97 | - port: 443 98 | protocol: TCP 99 | targetPort: 443 100 | --- 101 | apiVersion: rbac.authorization.k8s.io/v1 102 | kind: ClusterRole 103 | metadata: 104 | name: system:metrics-server 105 | rules: 106 | - apiGroups: 107 | - "" 108 | resources: 109 | - pods 110 | - nodes 111 | - nodes/stats 112 | - namespaces 113 | verbs: 114 | - get 115 | - list 116 | - watch 117 | - apiGroups: 118 | - "extensions" 119 | resources: 120 | - deployments 121 | verbs: 122 | - get 123 | - list 124 | - watch 125 | --- 126 | apiVersion: rbac.authorization.k8s.io/v1 127 | kind: ClusterRoleBinding 128 | metadata: 129 | name: system:metrics-server 130 | roleRef: 131 | apiGroup: rbac.authorization.k8s.io 132 | kind: ClusterRole 133 | name: system:metrics-server 134 | subjects: 135 | - kind: ServiceAccount 136 | name: metrics-server 137 | namespace: kube-system 138 | -------------------------------------------------------------------------------- /addons/nginx-ingress-dm/ingress-controller-cm.yml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-configuration 5 | namespace: ingress-nginx 6 | labels: 7 | app: ingress-nginx 8 | --- 9 | kind: ConfigMap 10 | apiVersion: v1 11 | metadata: 12 | name: tcp-services 13 | namespace: ingress-nginx 14 | data: 15 | 53: "external-dns/coredns-tcp:53" 16 | --- 17 | kind: ConfigMap 18 | apiVersion: v1 19 | metadata: 20 | name: udp-services 21 | namespace: ingress-nginx 22 | data: 23 | 53: "external-dns/coredns-udp:53" 24 | -------------------------------------------------------------------------------- /addons/nginx-ingress-dm/ingress-controller-ds.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: default-http-backend 5 | labels: 6 | app: default-http-backend 7 | namespace: ingress-nginx 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: default-http-backend 13 | template: 14 | metadata: 15 | labels: 16 | app: default-http-backend 17 | spec: 18 | terminationGracePeriodSeconds: 60 19 | containers: 20 | - name: default-http-backend 21 | image: registry.cn-hangzhou.aliyuncs.com/google_containers/defaultbackend:1.4 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | scheme: HTTP 27 | initialDelaySeconds: 30 28 | timeoutSeconds: 5 29 | ports: 30 | - containerPort: 8080 31 | resources: 32 | limits: 33 | cpu: 10m 34 | memory: 20Mi 35 | requests: 36 | cpu: 10m 37 | memory: 20Mi 38 | --- 39 | apiVersion: extensions/v1beta1 40 | kind: DaemonSet 41 | metadata: 42 | name: nginx-ingress-controller 43 | namespace: ingress-nginx 44 | spec: 45 | selector: 46 | matchLabels: 47 | app: ingress-nginx 48 | template: 49 | metadata: 50 | labels: 51 | app: ingress-nginx 52 | annotations: 53 | prometheus.io/port: '10254' 54 | prometheus.io/scrape: 'true' 55 | spec: 56 | serviceAccountName: nginx-ingress-serviceaccount 57 | nodeSelector: 58 | ingress: nginx 59 | hostNetwork: true 60 | containers: 61 | - name: nginx-ingress-controller 62 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.17.0 63 | args: 64 | - /nginx-ingress-controller 65 | - --default-backend-service=$(POD_NAMESPACE)/default-http-backend 66 | - --configmap=$(POD_NAMESPACE)/nginx-configuration 67 | - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services 68 | - --udp-services-configmap=$(POD_NAMESPACE)/udp-services 69 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx 70 | - --annotations-prefix=nginx.ingress.kubernetes.io 71 | securityContext: 72 | capabilities: 73 | drop: 74 | - ALL 75 | add: 76 | - NET_BIND_SERVICE 77 | runAsUser: 33 78 | env: 79 | - name: POD_NAME 80 | valueFrom: 81 | fieldRef: 82 | fieldPath: metadata.name 83 | - name: POD_NAMESPACE 84 | valueFrom: 85 | fieldRef: 86 | fieldPath: metadata.namespace 87 | ports: 88 | - name: http 89 | containerPort: 80 90 | - name: https 91 | containerPort: 443 92 | livenessProbe: 93 | failureThreshold: 3 94 | httpGet: 95 | path: /healthz 96 | port: 10254 97 | scheme: HTTP 98 | initialDelaySeconds: 10 99 | periodSeconds: 10 100 | successThreshold: 1 101 | timeoutSeconds: 1 102 | readinessProbe: 103 | failureThreshold: 3 104 | httpGet: 105 | path: /healthz 106 | port: 10254 107 | scheme: HTTP 108 | periodSeconds: 10 109 | successThreshold: 1 110 | timeoutSeconds: 1 111 | -------------------------------------------------------------------------------- /addons/nginx-ingress-dm/ingress-controller-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: nginx-ingress-clusterrole 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "extensions" 33 | resources: 34 | - ingresses 35 | verbs: 36 | - get 37 | - list 38 | - watch 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | - patch 46 | - apiGroups: 47 | - "extensions" 48 | resources: 49 | - ingresses/status 50 | verbs: 51 | - update 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1beta1 54 | kind: Role 55 | metadata: 56 | name: nginx-ingress-role 57 | namespace: ingress-nginx 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - configmaps 63 | - pods 64 | - secrets 65 | - namespaces 66 | verbs: 67 | - get 68 | - apiGroups: 69 | - "" 70 | resources: 71 | - configmaps 72 | resourceNames: 73 | - "ingress-controller-leader-nginx" 74 | verbs: 75 | - get 76 | - update 77 | - apiGroups: 78 | - "" 79 | resources: 80 | - configmaps 81 | verbs: 82 | - create 83 | - apiGroups: 84 | - "" 85 | resources: 86 | - endpoints 87 | verbs: 88 | - get 89 | --- 90 | apiVersion: rbac.authorization.k8s.io/v1beta1 91 | kind: RoleBinding 92 | metadata: 93 | name: nginx-ingress-role-nisa-binding 94 | namespace: ingress-nginx 95 | roleRef: 96 | apiGroup: rbac.authorization.k8s.io 97 | kind: Role 98 | name: nginx-ingress-role 99 | subjects: 100 | - kind: ServiceAccount 101 | name: nginx-ingress-serviceaccount 102 | namespace: ingress-nginx 103 | --- 104 | apiVersion: rbac.authorization.k8s.io/v1beta1 105 | kind: ClusterRoleBinding 106 | metadata: 107 | name: nginx-ingress-clusterrole-nisa-binding 108 | roleRef: 109 | apiGroup: rbac.authorization.k8s.io 110 | kind: ClusterRole 111 | name: nginx-ingress-clusterrole 112 | subjects: 113 | - kind: ServiceAccount 114 | name: nginx-ingress-serviceaccount 115 | namespace: ingress-nginx 116 | -------------------------------------------------------------------------------- /addons/nginx-ingress-dm/ingress-controller-sa.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nginx-ingress-serviceaccount 5 | namespace: ingress-nginx 6 | -------------------------------------------------------------------------------- /addons/nginx-ingress-dm/ingress-controller-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-nginx 5 | namespace: ingress-nginx 6 | labels: 7 | app: ingress-nginx 8 | spec: 9 | type: nodePort 10 | ports: 11 | - port: 80 12 | name: http 13 | targetPort: 80 14 | nodePort: 80 15 | - port: 443 16 | name: https 17 | targetPort: 443 18 | nodePort: 443 19 | selector: 20 | app: ingress-nginx 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: default-http-backend 26 | namespace: ingress-nginx 27 | labels: 28 | app: default-http-backend 29 | spec: 30 | ports: 31 | - port: 80 32 | targetPort: 8080 33 | selector: 34 | app: default-http-backend 35 | -------------------------------------------------------------------------------- /addons/nginx-ingress/ingress-controller-cm.yml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-configuration 5 | namespace: ingress-nginx 6 | labels: 7 | app: ingress-nginx 8 | --- 9 | kind: ConfigMap 10 | apiVersion: v1 11 | metadata: 12 | name: tcp-services 13 | namespace: ingress-nginx 14 | data: 15 | 53: "external-dns/coredns-tcp:53" 16 | --- 17 | kind: ConfigMap 18 | apiVersion: v1 19 | metadata: 20 | name: udp-services 21 | namespace: ingress-nginx 22 | data: 23 | 53: "external-dns/coredns-udp:53" 24 | -------------------------------------------------------------------------------- /addons/nginx-ingress/ingress-controller-deploy.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: default-http-backend 5 | labels: 6 | app: default-http-backend 7 | namespace: ingress-nginx 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: default-http-backend 13 | template: 14 | metadata: 15 | labels: 16 | app: default-http-backend 17 | spec: 18 | terminationGracePeriodSeconds: 60 19 | containers: 20 | - name: default-http-backend 21 | image: registry.cn-hangzhou.aliyuncs.com/google_containers/defaultbackend:1.4 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | scheme: HTTP 27 | initialDelaySeconds: 30 28 | timeoutSeconds: 5 29 | ports: 30 | - containerPort: 8080 31 | resources: 32 | limits: 33 | cpu: 10m 34 | memory: 20Mi 35 | requests: 36 | cpu: 10m 37 | memory: 20Mi 38 | --- 39 | apiVersion: v1 40 | kind: Service 41 | metadata: 42 | name: default-http-backend 43 | namespace: ingress-nginx 44 | labels: 45 | app: default-http-backend 46 | spec: 47 | ports: 48 | - port: 80 49 | targetPort: 8080 50 | selector: 51 | app: default-http-backend 52 | --- 53 | apiVersion: extensions/v1beta1 54 | kind: Deployment 55 | metadata: 56 | labels: 57 | app: ingress-nginx 58 | name: nginx-ingress-controller 59 | namespace: ingress-nginx 60 | spec: 61 | replicas: 2 62 | selector: 63 | matchLabels: 64 | app: ingress-nginx 65 | template: 66 | metadata: 67 | annotations: 68 | prometheus.io/port: '10254' 69 | prometheus.io/scrape: 'true' 70 | labels: 71 | app: ingress-nginx 72 | spec: 73 | serviceAccountName: nginx-ingress-serviceaccount 74 | containers: 75 | - name: nginx-ingress-controller 76 | image: ibmcom/nginx-ingress-controller:0.19.0 77 | imagePullPolicy: IfNotPresent 78 | args: 79 | - /nginx-ingress-controller 80 | - '--default-backend-service=$(POD_NAMESPACE)/default-http-backend' 81 | - '--configmap=$(POD_NAMESPACE)/nginx-configuration' 82 | - '--tcp-services-configmap=$(POD_NAMESPACE)/tcp-services' 83 | - '--udp-services-configmap=$(POD_NAMESPACE)/udp-services' 84 | - '--annotations-prefix=nginx.ingress.kubernetes.io' 85 | - '--publish-service=$(POD_NAMESPACE)/nginx-ingress' 86 | - '--v=2' 87 | securityContext: 88 | capabilities: 89 | add: 90 | - NET_BIND_SERVICE 91 | drop: 92 | - ALL 93 | runAsUser: 33 94 | env: 95 | - name: POD_NAME 96 | valueFrom: 97 | fieldRef: 98 | apiVersion: v1 99 | fieldPath: metadata.name 100 | - name: POD_NAMESPACE 101 | valueFrom: 102 | fieldRef: 103 | apiVersion: v1 104 | fieldPath: metadata.namespace 105 | ports: 106 | - containerPort: 80 107 | name: http 108 | protocol: TCP 109 | - containerPort: 443 110 | name: https 111 | protocol: TCP 112 | livenessProbe: 113 | failureThreshold: 3 114 | httpGet: 115 | path: /healthz 116 | port: 10254 117 | scheme: HTTP 118 | initialDelaySeconds: 10 119 | periodSeconds: 10 120 | successThreshold: 1 121 | timeoutSeconds: 1 122 | readinessProbe: 123 | failureThreshold: 3 124 | httpGet: 125 | path: /healthz 126 | port: 10254 127 | scheme: HTTP 128 | periodSeconds: 10 129 | successThreshold: 1 130 | timeoutSeconds: 1 131 | volumeMounts: 132 | - mountPath: /etc/localtime 133 | name: localtime 134 | readOnly: true 135 | initContainers: 136 | - command: 137 | - /bin/sh 138 | - '-c' 139 | - | 140 | sysctl -w net.core.somaxconn=65535 141 | sysctl -w net.ipv4.ip_local_port_range="1024 65535" 142 | sysctl -w fs.file-max=1048576 143 | sysctl -w fs.inotify.max_user_instances=16384 144 | sysctl -w fs.inotify.max_user_watches=524288 145 | sysctl -w fs.inotify.max_queued_events=16384 146 | image: busybox:latest 147 | imagePullPolicy: Always 148 | name: init-sysctl 149 | securityContext: 150 | privileged: true 151 | nodeSelector: 152 | beta.kubernetes.io/os: linux 153 | volumes: 154 | - hostPath: 155 | path: /etc/localtime 156 | type: File 157 | name: localtime 158 | -------------------------------------------------------------------------------- /addons/nginx-ingress/ingress-controller-lb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | prometheus.io/scrape: "true" 6 | prometheus.io/port: "80" 7 | labels: 8 | app: nginx-ingress-lb 9 | name: nginx-ingress-lb 10 | namespace: ingress-nginx 11 | spec: 12 | ports: 13 | - name: http 14 | nodePort: 30495 15 | port: 80 16 | protocol: TCP 17 | targetPort: 80 18 | - name: https 19 | nodePort: 30301 20 | port: 443 21 | protocol: TCP 22 | targetPort: 443 23 | selector: 24 | app: ingress-nginx 25 | type: LoadBalancer 26 | -------------------------------------------------------------------------------- /addons/nginx-ingress/ingress-controller-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: nginx-ingress-clusterrole 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "extensions" 33 | resources: 34 | - ingresses 35 | verbs: 36 | - get 37 | - list 38 | - watch 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | - patch 46 | - apiGroups: 47 | - "extensions" 48 | resources: 49 | - ingresses/status 50 | verbs: 51 | - update 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1beta1 54 | kind: Role 55 | metadata: 56 | name: nginx-ingress-role 57 | namespace: ingress-nginx 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - configmaps 63 | - pods 64 | - secrets 65 | - namespaces 66 | verbs: 67 | - get 68 | - apiGroups: 69 | - "" 70 | resources: 71 | - configmaps 72 | resourceNames: 73 | - "ingress-controller-leader-nginx" 74 | verbs: 75 | - get 76 | - update 77 | - apiGroups: 78 | - "" 79 | resources: 80 | - configmaps 81 | verbs: 82 | - create 83 | - apiGroups: 84 | - "" 85 | resources: 86 | - endpoints 87 | verbs: 88 | - get 89 | --- 90 | apiVersion: rbac.authorization.k8s.io/v1beta1 91 | kind: RoleBinding 92 | metadata: 93 | name: nginx-ingress-role-nisa-binding 94 | namespace: ingress-nginx 95 | roleRef: 96 | apiGroup: rbac.authorization.k8s.io 97 | kind: Role 98 | name: nginx-ingress-role 99 | subjects: 100 | - kind: ServiceAccount 101 | name: nginx-ingress-serviceaccount 102 | namespace: ingress-nginx 103 | --- 104 | apiVersion: rbac.authorization.k8s.io/v1beta1 105 | kind: ClusterRoleBinding 106 | metadata: 107 | name: nginx-ingress-clusterrole-nisa-binding 108 | roleRef: 109 | apiGroup: rbac.authorization.k8s.io 110 | kind: ClusterRole 111 | name: nginx-ingress-clusterrole 112 | subjects: 113 | - kind: ServiceAccount 114 | name: nginx-ingress-serviceaccount 115 | namespace: ingress-nginx 116 | -------------------------------------------------------------------------------- /addons/nginx-ingress/ingress-controller-sa.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nginx-ingress-serviceaccount 5 | namespace: ingress-nginx 6 | -------------------------------------------------------------------------------- /addons/nginx-ingress/ingress-controller-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-nginx 5 | namespace: ingress-nginx 6 | labels: 7 | app: ingress-nginx 8 | spec: 9 | type: LoadBalancer 10 | externalIPs: 11 | - 192.168.150.252 12 | ports: 13 | - port: 80 14 | name: http 15 | targetPort: 80 16 | - port: 443 17 | name: https 18 | targetPort: 443 19 | selector: 20 | app: ingress-nginx 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: default-http-backend 26 | namespace: ingress-nginx 27 | labels: 28 | app: default-http-backend 29 | spec: 30 | ports: 31 | - port: 80 32 | targetPort: 8080 33 | selector: 34 | app: default-http-backend 35 | -------------------------------------------------------------------------------- /addons/prometheus-all/00namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | group: metrics.k8s.io 7 | groupPriorityMinimum: 100 8 | insecureSkipTLSVerify: true 9 | service: 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | version: v1beta1 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-adapter 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes 10 | - namespaces 11 | - pods 12 | - services 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 6 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | name: system:aggregated-metrics-reader 9 | rules: 10 | - apiGroups: 11 | - metrics.k8s.io 12 | resources: 13 | - pods 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-adapter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-adapter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: resource-metrics:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: resource-metrics-server-resources 5 | rules: 6 | - apiGroups: 7 | - metrics.k8s.io 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-configMap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.yaml: | 4 | resourceRules: 5 | cpu: 6 | containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container_name!="POD",container_name!="",pod_name!=""}[1m])) by (<<.GroupBy>>) 7 | nodeQuery: sum(1 - rate(node_cpu_seconds_total{mode="idle"}[1m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>) 8 | resources: 9 | overrides: 10 | node: 11 | resource: node 12 | namespace: 13 | resource: namespace 14 | pod_name: 15 | resource: pod 16 | containerLabel: container_name 17 | memory: 18 | containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container_name!="POD",container_name!="",pod_name!=""}) by (<<.GroupBy>>) 19 | nodeQuery: sum(node:node_memory_bytes_total:sum{<<.LabelMatchers>>} - node:node_memory_bytes_available:sum{<<.LabelMatchers>>}) by (<<.GroupBy>>) 20 | resources: 21 | overrides: 22 | node: 23 | resource: node 24 | namespace: 25 | resource: namespace 26 | pod_name: 27 | resource: pod 28 | containerLabel: container_name 29 | window: 1m 30 | kind: ConfigMap 31 | metadata: 32 | name: adapter-config 33 | namespace: monitoring 34 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: prometheus-adapter 11 | strategy: 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | template: 16 | metadata: 17 | labels: 18 | name: prometheus-adapter 19 | spec: 20 | containers: 21 | - args: 22 | - --cert-dir=/var/run/serving-cert 23 | - --config=/etc/adapter/config.yaml 24 | - --logtostderr=true 25 | - --metrics-relist-interval=1m 26 | - --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/ 27 | - --secure-port=6443 28 | image: zhangguanzhang/quay.io.coreos.k8s-prometheus-adapter-amd64:v0.4.1 29 | name: prometheus-adapter 30 | ports: 31 | - containerPort: 6443 32 | volumeMounts: 33 | - mountPath: /tmp 34 | name: tmpfs 35 | readOnly: false 36 | - mountPath: /var/run/serving-cert 37 | name: volume-serving-cert 38 | readOnly: false 39 | - mountPath: /etc/adapter 40 | name: config 41 | readOnly: false 42 | nodeSelector: 43 | kubernetes.io/os: linux 44 | serviceAccountName: prometheus-adapter 45 | volumes: 46 | - emptyDir: {} 47 | name: tmpfs 48 | - emptyDir: {} 49 | name: volume-serving-cert 50 | - configMap: 51 | name: adapter-config 52 | name: config 53 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: resource-metrics-auth-reader 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: extension-apiserver-authentication-reader 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-adapter 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 6443 13 | selector: 14 | name: prometheus-adapter 15 | -------------------------------------------------------------------------------- /addons/prometheus-all/adapter/prometheus-adapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus-all/alertmanager/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | baseImage: prom/alertmanager 10 | nodeSelector: 11 | kubernetes.io/os: linux 12 | replicas: 3 13 | securityContext: 14 | fsGroup: 2000 15 | runAsNonRoot: true 16 | runAsUser: 1000 17 | serviceAccountName: alertmanager-main 18 | version: v0.18.0 19 | -------------------------------------------------------------------------------- /addons/prometheus-all/alertmanager/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | alertmanager.yaml: Imdsb2JhbCI6CiAgInJlc29sdmVfdGltZW91dCI6ICI1bSIKInJlY2VpdmVycyI6Ci0gIm5hbWUiOiAibnVsbCIKInJvdXRlIjoKICAiZ3JvdXBfYnkiOgogIC0gImpvYiIKICAiZ3JvdXBfaW50ZXJ2YWwiOiAiNW0iCiAgImdyb3VwX3dhaXQiOiAiMzBzIgogICJyZWNlaXZlciI6ICJudWxsIgogICJyZXBlYXRfaW50ZXJ2YWwiOiAiMTJoIgogICJyb3V0ZXMiOgogIC0gIm1hdGNoIjoKICAgICAgImFsZXJ0bmFtZSI6ICJXYXRjaGRvZyIKICAgICJyZWNlaXZlciI6ICJudWxsIg== 4 | kind: Secret 5 | metadata: 6 | name: alertmanager-main 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /addons/prometheus-all/alertmanager/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /addons/prometheus-all/alertmanager/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus-all/grafana/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /addons/prometheus-all/grafana/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "", 9 | "name": "0", 10 | "options": { 11 | "path": "/grafana-dashboard-definitions/0" 12 | }, 13 | "orgId": 1, 14 | "type": "file" 15 | } 16 | ] 17 | } 18 | kind: ConfigMap 19 | metadata: 20 | name: grafana-dashboards 21 | namespace: monitoring 22 | -------------------------------------------------------------------------------- /addons/prometheus-all/grafana/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 3000 12 | targetPort: http 13 | selector: 14 | app: grafana 15 | -------------------------------------------------------------------------------- /addons/prometheus-all/grafana/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus-all/ingress-prometheus/ing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: prometheus.com 5 | namespace: monitoring 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | spec: 9 | tls: 10 | - secretName: mofangge.cc 11 | hosts: 12 | - prometheus.mofangge.cc 13 | - grafana.mofangge.cc 14 | rules: 15 | - host: prometheus.mofangge.cc 16 | http: 17 | paths: 18 | - backend: 19 | serviceName: prometheus-k8s 20 | servicePort: 9090 21 | - host: grafana.mofangge.cc 22 | http: 23 | paths: 24 | - backend: 25 | serviceName: grafana 26 | servicePort: 3000 27 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kube-state-metrics 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | - nodes 12 | - pods 13 | - services 14 | - resourcequotas 15 | - replicationcontrollers 16 | - limitranges 17 | - persistentvolumeclaims 18 | - persistentvolumes 19 | - namespaces 20 | - endpoints 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - extensions 26 | resources: 27 | - daemonsets 28 | - deployments 29 | - replicasets 30 | - ingresses 31 | verbs: 32 | - list 33 | - watch 34 | - apiGroups: 35 | - apps 36 | resources: 37 | - statefulsets 38 | - daemonsets 39 | - deployments 40 | - replicasets 41 | verbs: 42 | - list 43 | - watch 44 | - apiGroups: 45 | - batch 46 | resources: 47 | - cronjobs 48 | - jobs 49 | verbs: 50 | - list 51 | - watch 52 | - apiGroups: 53 | - autoscaling 54 | resources: 55 | - horizontalpodautoscalers 56 | verbs: 57 | - list 58 | - watch 59 | - apiGroups: 60 | - authentication.k8s.io 61 | resources: 62 | - tokenreviews 63 | verbs: 64 | - create 65 | - apiGroups: 66 | - authorization.k8s.io 67 | resources: 68 | - subjectaccessreviews 69 | verbs: 70 | - create 71 | - apiGroups: 72 | - policy 73 | resources: 74 | - poddisruptionbudgets 75 | verbs: 76 | - list 77 | - watch 78 | - apiGroups: 79 | - certificates.k8s.io 80 | resources: 81 | - certificatesigningrequests 82 | verbs: 83 | - list 84 | - watch 85 | - apiGroups: 86 | - storage.k8s.io 87 | resources: 88 | - storageclasses 89 | verbs: 90 | - list 91 | - watch 92 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kube-state-metrics 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kube-state-metrics 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: kube-state-metrics 13 | template: 14 | metadata: 15 | labels: 16 | app: kube-state-metrics 17 | spec: 18 | containers: 19 | - args: 20 | - --logtostderr 21 | - --secure-listen-address=:8443 22 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 23 | - --upstream=http://127.0.0.1:8081/ 24 | image: zhangguanzhang/quay.io.coreos.kube-rbac-proxy:v0.4.1 25 | name: kube-rbac-proxy-main 26 | ports: 27 | - containerPort: 8443 28 | name: https-main 29 | resources: 30 | limits: 31 | cpu: 20m 32 | memory: 40Mi 33 | requests: 34 | cpu: 10m 35 | memory: 20Mi 36 | - args: 37 | - --logtostderr 38 | - --secure-listen-address=:9443 39 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 40 | - --upstream=http://127.0.0.1:8082/ 41 | image: zhangguanzhang/quay.io.coreos.kube-rbac-proxy:v0.4.1 42 | name: kube-rbac-proxy-self 43 | ports: 44 | - containerPort: 9443 45 | name: https-self 46 | resources: 47 | limits: 48 | cpu: 20m 49 | memory: 40Mi 50 | requests: 51 | cpu: 10m 52 | memory: 20Mi 53 | - args: 54 | - --host=127.0.0.1 55 | - --port=8081 56 | - --telemetry-host=127.0.0.1 57 | - --telemetry-port=8082 58 | image: skymyyang/kube-state-metrics:v1.7.1 59 | name: kube-state-metrics 60 | resources: 61 | limits: 62 | cpu: 100m 63 | memory: 150Mi 64 | requests: 65 | cpu: 100m 66 | memory: 150Mi 67 | - command: 68 | - /pod_nanny 69 | - --container=kube-state-metrics 70 | - --cpu=100m 71 | - --extra-cpu=2m 72 | - --memory=150Mi 73 | - --extra-memory=30Mi 74 | - --threshold=5 75 | - --deployment=kube-state-metrics 76 | env: 77 | - name: MY_POD_NAME 78 | valueFrom: 79 | fieldRef: 80 | apiVersion: v1 81 | fieldPath: metadata.name 82 | - name: MY_POD_NAMESPACE 83 | valueFrom: 84 | fieldRef: 85 | apiVersion: v1 86 | fieldPath: metadata.namespace 87 | image: skymyyang/addon-resizer:1.8.4 88 | name: addon-resizer 89 | resources: 90 | limits: 91 | cpu: 50m 92 | memory: 30Mi 93 | requests: 94 | cpu: 10m 95 | memory: 30Mi 96 | nodeSelector: 97 | kubernetes.io/os: linux 98 | securityContext: 99 | runAsNonRoot: true 100 | runAsUser: 65534 101 | serviceAccountName: kube-state-metrics 102 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - apiGroups: 14 | - extensions 15 | resourceNames: 16 | - kube-state-metrics 17 | resources: 18 | - deployments 19 | verbs: 20 | - get 21 | - update 22 | - apiGroups: 23 | - apps 24 | resourceNames: 25 | - kube-state-metrics 26 | resources: 27 | - deployments 28 | verbs: 29 | - get 30 | - update 31 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-roleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: kube-state-metrics 10 | subjects: 11 | - kind: ServiceAccount 12 | name: kube-state-metrics 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-main 12 | port: 8443 13 | targetPort: https-main 14 | - name: https-self 15 | port: 9443 16 | targetPort: https-self 17 | selector: 18 | app: kube-state-metrics 19 | -------------------------------------------------------------------------------- /addons/prometheus-all/kube-state-metrics/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus-all/node-exporter/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /addons/prometheus-all/node-exporter/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: node-exporter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: node-exporter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: node-exporter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/node-exporter/node-exporter-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: node-exporter 12 | template: 13 | metadata: 14 | labels: 15 | app: node-exporter 16 | spec: 17 | containers: 18 | - args: 19 | - --web.listen-address=127.0.0.1:9100 20 | - --path.procfs=/host/proc 21 | - --path.sysfs=/host/sys 22 | - --path.rootfs=/host/root 23 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) 24 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ 25 | image: prom/node-exporter:v0.18.1 26 | name: node-exporter 27 | resources: 28 | limits: 29 | cpu: 250m 30 | memory: 180Mi 31 | requests: 32 | cpu: 102m 33 | memory: 180Mi 34 | volumeMounts: 35 | - mountPath: /host/proc 36 | name: proc 37 | readOnly: false 38 | - mountPath: /host/sys 39 | name: sys 40 | readOnly: false 41 | - mountPath: /host/root 42 | mountPropagation: HostToContainer 43 | name: root 44 | readOnly: true 45 | - args: 46 | - --logtostderr 47 | - --secure-listen-address=$(IP):9100 48 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 49 | - --upstream=http://127.0.0.1:9100/ 50 | env: 51 | - name: IP 52 | valueFrom: 53 | fieldRef: 54 | fieldPath: status.podIP 55 | image: zhangguanzhang/quay.io.coreos.kube-rbac-proxy:v0.4.1 56 | name: kube-rbac-proxy 57 | ports: 58 | - containerPort: 9100 59 | hostPort: 9100 60 | name: https 61 | resources: 62 | limits: 63 | cpu: 20m 64 | memory: 60Mi 65 | requests: 66 | cpu: 10m 67 | memory: 20Mi 68 | hostNetwork: true 69 | hostPID: true 70 | nodeSelector: 71 | kubernetes.io/os: linux 72 | securityContext: 73 | runAsNonRoot: true 74 | runAsUser: 65534 75 | serviceAccountName: node-exporter 76 | tolerations: 77 | - operator: Exists 78 | volumes: 79 | - hostPath: 80 | path: /proc 81 | name: proc 82 | - hostPath: 83 | path: /sys 84 | name: sys 85 | - hostPath: 86 | path: / 87 | name: root 88 | -------------------------------------------------------------------------------- /addons/prometheus-all/node-exporter/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https 12 | port: 9100 13 | targetPort: https 14 | selector: 15 | app: node-exporter 16 | -------------------------------------------------------------------------------- /addons/prometheus-all/node-exporter/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus-all/operator/0prometheus-operator-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.31.1 8 | name: prometheus-operator 9 | rules: 10 | - apiGroups: 11 | - apiextensions.k8s.io 12 | resources: 13 | - customresourcedefinitions 14 | verbs: 15 | - '*' 16 | - apiGroups: 17 | - monitoring.coreos.com 18 | resources: 19 | - alertmanagers 20 | - prometheuses 21 | - prometheuses/finalizers 22 | - alertmanagers/finalizers 23 | - servicemonitors 24 | - podmonitors 25 | - prometheusrules 26 | verbs: 27 | - '*' 28 | - apiGroups: 29 | - apps 30 | resources: 31 | - statefulsets 32 | verbs: 33 | - '*' 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - configmaps 38 | - secrets 39 | verbs: 40 | - '*' 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - pods 45 | verbs: 46 | - list 47 | - delete 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - services 52 | - services/finalizers 53 | - endpoints 54 | verbs: 55 | - get 56 | - create 57 | - update 58 | - delete 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - nodes 63 | verbs: 64 | - list 65 | - watch 66 | - apiGroups: 67 | - "" 68 | resources: 69 | - namespaces 70 | verbs: 71 | - get 72 | - list 73 | - watch 74 | -------------------------------------------------------------------------------- /addons/prometheus-all/operator/0prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.31.1 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /addons/prometheus-all/operator/0prometheus-operator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.31.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/component: controller 15 | app.kubernetes.io/name: prometheus-operator 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/component: controller 20 | app.kubernetes.io/name: prometheus-operator 21 | app.kubernetes.io/version: v0.31.1 22 | spec: 23 | containers: 24 | - args: 25 | - --kubelet-service=kube-system/kubelet 26 | - --logtostderr=true 27 | - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 28 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.31.1 29 | image: zhangguanzhang/quay.io.coreos.prometheus-operator:v0.31.1 30 | name: prometheus-operator 31 | ports: 32 | - containerPort: 8080 33 | name: http 34 | resources: 35 | limits: 36 | cpu: 200m 37 | memory: 200Mi 38 | requests: 39 | cpu: 100m 40 | memory: 100Mi 41 | securityContext: 42 | allowPrivilegeEscalation: false 43 | nodeSelector: 44 | beta.kubernetes.io/os: linux 45 | securityContext: 46 | runAsNonRoot: true 47 | runAsUser: 65534 48 | serviceAccountName: prometheus-operator 49 | -------------------------------------------------------------------------------- /addons/prometheus-all/operator/0prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.31.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: http 14 | port: 8080 15 | targetPort: http 16 | selector: 17 | app.kubernetes.io/component: controller 18 | app.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /addons/prometheus-all/operator/0prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.31.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: k8s 7 | namespace: monitoring 8 | spec: 9 | alerting: 10 | alertmanagers: 11 | - name: alertmanager-main 12 | namespace: monitoring 13 | port: web 14 | baseImage: prom/prometheus 15 | nodeSelector: 16 | kubernetes.io/os: linux 17 | podMonitorSelector: {} 18 | replicas: 2 19 | resources: 20 | requests: 21 | memory: 400Mi 22 | ruleSelector: 23 | matchLabels: 24 | prometheus: k8s 25 | role: alert-rules 26 | securityContext: 27 | fsGroup: 2000 28 | runAsNonRoot: true 29 | runAsUser: 1000 30 | serviceAccountName: prometheus-k8s 31 | serviceMonitorNamespaceSelector: {} 32 | serviceMonitorSelector: {} 33 | version: v2.11.0 34 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: prometheus-k8s-config 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-k8s 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-roleBindingSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: prometheus-k8s 12 | subjects: 13 | - kind: ServiceAccount 14 | name: prometheus-k8s 15 | namespace: monitoring 16 | - apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | name: prometheus-k8s 20 | namespace: kube-system 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: Role 24 | name: prometheus-k8s 25 | subjects: 26 | - kind: ServiceAccount 27 | name: prometheus-k8s 28 | namespace: monitoring 29 | - apiVersion: rbac.authorization.k8s.io/v1 30 | kind: RoleBinding 31 | metadata: 32 | name: prometheus-k8s 33 | namespace: monitoring 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: Role 37 | name: prometheus-k8s 38 | subjects: 39 | - kind: ServiceAccount 40 | name: prometheus-k8s 41 | namespace: monitoring 42 | kind: RoleBindingList 43 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-roleSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - services 13 | - endpoints 14 | - pods 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiVersion: rbac.authorization.k8s.io/v1 20 | kind: Role 21 | metadata: 22 | name: prometheus-k8s 23 | namespace: kube-system 24 | rules: 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - services 29 | - endpoints 30 | - pods 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiVersion: rbac.authorization.k8s.io/v1 36 | kind: Role 37 | metadata: 38 | name: prometheus-k8s 39 | namespace: monitoring 40 | rules: 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - services 45 | - endpoints 46 | - pods 47 | verbs: 48 | - get 49 | - list 50 | - watch 51 | kind: RoleList 52 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /addons/prometheus-all/prometheus/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/0prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.31.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - honorLabels: true 13 | port: http 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: controller 17 | app.kubernetes.io/name: prometheus-operator 18 | app.kubernetes.io/version: v0.31.1 19 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - interval: 15s 9 | port: http 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/kube-state-metrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-state-metrics 6 | name: kube-state-metrics 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | port: https-main 14 | scheme: https 15 | scrapeTimeout: 30s 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 19 | interval: 30s 20 | port: https-self 21 | scheme: https 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | jobLabel: k8s-app 25 | selector: 26 | matchLabels: 27 | k8s-app: kube-state-metrics 28 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/node-exporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: node-exporter 6 | name: node-exporter 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | relabelings: 14 | - action: replace 15 | regex: (.*) 16 | replacment: $1 17 | sourceLabels: 18 | - __meta_kubernetes_pod_node_name 19 | targetLabel: instance 20 | scheme: https 21 | tlsConfig: 22 | insecureSkipVerify: true 23 | jobLabel: k8s-app 24 | selector: 25 | matchLabels: 26 | k8s-app: node-exporter 27 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/prometheus-serviceMonitorApiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: apiserver 6 | name: kube-apiserver 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | metricRelabelings: 13 | - action: drop 14 | regex: etcd_(debugging|disk|request|server).* 15 | sourceLabels: 16 | - __name__ 17 | - action: drop 18 | regex: apiserver_admission_controller_admission_latencies_seconds_.* 19 | sourceLabels: 20 | - __name__ 21 | - action: drop 22 | regex: apiserver_admission_step_admission_latencies_seconds_.* 23 | sourceLabels: 24 | - __name__ 25 | port: https 26 | scheme: https 27 | tlsConfig: 28 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 29 | serverName: kubernetes 30 | jobLabel: component 31 | namespaceSelector: 32 | matchNames: 33 | - default 34 | selector: 35 | matchLabels: 36 | component: apiserver 37 | provider: kubernetes 38 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/prometheus-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: coredns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/prometheus-serviceMonitorKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | metricRelabelings: 12 | - action: drop 13 | regex: etcd_(debugging|disk|request|server).* 14 | sourceLabels: 15 | - __name__ 16 | port: http-metrics 17 | jobLabel: k8s-app 18 | namespaceSelector: 19 | matchNames: 20 | - kube-system 21 | selector: 22 | matchLabels: 23 | k8s-app: kube-controller-manager 24 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/prometheus-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: http-metrics 12 | jobLabel: k8s-app 13 | namespaceSelector: 14 | matchNames: 15 | - kube-system 16 | selector: 17 | matchLabels: 18 | k8s-app: kube-scheduler 19 | -------------------------------------------------------------------------------- /addons/prometheus-all/serviceMonitor/prometheus-serviceMonitorKubelet.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kubelet 6 | name: kubelet 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | port: https-metrics 14 | scheme: https 15 | tlsConfig: 16 | insecureSkipVerify: true 17 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 18 | honorLabels: true 19 | interval: 30s 20 | metricRelabelings: 21 | - action: drop 22 | regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) 23 | sourceLabels: 24 | - __name__ 25 | path: /metrics/cadvisor 26 | port: https-metrics 27 | scheme: https 28 | tlsConfig: 29 | insecureSkipVerify: true 30 | jobLabel: k8s-app 31 | namespaceSelector: 32 | matchNames: 33 | - kube-system 34 | selector: 35 | matchLabels: 36 | k8s-app: kubelet 37 | -------------------------------------------------------------------------------- /addons/traefic-ingress/daemonset.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: traefik-ingress-lb 5 | namespace: kube-system 6 | labels: 7 | k8s-app: traefik-ingress-lb 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | k8s-app: traefik-ingress-lb 13 | name: traefik-ingress-lb 14 | spec: 15 | terminationGracePeriodSeconds: 60 16 | hostNetwork: true 17 | restartPolicy: Always 18 | serviceAccountName: ingress 19 | containers: 20 | - image: traefik:v1.6 21 | name: traefik-ingress-lb 22 | resources: 23 | limits: 24 | cpu: 200m 25 | memory: 80Mi 26 | requests: 27 | cpu: 100m 28 | memory: 50Mi 29 | ports: 30 | - name: http 31 | containerPort: 80 32 | hostPort: 80 33 | - name: admin 34 | containerPort: 8580 35 | hostPort: 8580 36 | args: 37 | - --web 38 | - --web.address=:8580 39 | - --kubernetes 40 | nodeSelector: 41 | edgenode: "true" 42 | -------------------------------------------------------------------------------- /addons/traefic-ingress/ingress-rbac.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: ingress 5 | namespace: kube-system 6 | 7 | --- 8 | 9 | kind: ClusterRoleBinding 10 | apiVersion: rbac.authorization.k8s.io/v1beta1 11 | metadata: 12 | name: ingress 13 | subjects: 14 | - kind: ServiceAccount 15 | name: ingress 16 | namespace: kube-system 17 | roleRef: 18 | kind: ClusterRole 19 | name: cluster-admin 20 | apiGroup: rbac.authorization.k8s.io 21 | -------------------------------------------------------------------------------- /addons/traefic-ingress/traefik-ui.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: traefik-web-ui 5 | namespace: kube-system 6 | spec: 7 | selector: 8 | k8s-app: traefik-ingress-lb 9 | ports: 10 | - name: web 11 | port: 80 12 | targetPort: 8580 13 | --- 14 | apiVersion: extensions/v1beta1 15 | kind: Ingress 16 | metadata: 17 | name: traefik-web-ui 18 | namespace: kube-system 19 | spec: 20 | rules: 21 | - host: traefik-ui.local 22 | http: 23 | paths: 24 | - path: / 25 | backend: 26 | serviceName: traefik-web-ui 27 | servicePort: web 28 | -------------------------------------------------------------------------------- /apps/Jenkins/jenkins-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: jenkins 6 | namespace: kube-ops 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | app: jenkins 12 | spec: 13 | terminationGracePeriodSeconds: 10 14 | serviceAccountName: jenkins 15 | containers: 16 | - name: jenkins 17 | image: jenkins/jenkins:lts 18 | imagePullPolicy: IfNotPresent 19 | ports: 20 | - containerPort: 8080 21 | name: web 22 | protocol: TCP 23 | - containerPort: 50000 24 | name: agent 25 | protocol: TCP 26 | resources: 27 | limits: 28 | cpu: 1000m 29 | memory: 1Gi 30 | requests: 31 | cpu: 500m 32 | memory: 512Mi 33 | livenessProbe: 34 | httpGet: 35 | path: /login 36 | port: 8080 37 | initialDelaySeconds: 60 38 | timeoutSeconds: 5 39 | 40 | failureThreshold: 12 # ~2 minutes 41 | readinessProbe: 42 | httpGet: 43 | path: /login 44 | port: 8080 45 | initialDelaySeconds: 60 46 | timeoutSeconds: 5 47 | failureThreshold: 12 # ~2 minutes 48 | volumeMounts: 49 | - name: jenkinshome 50 | subPath: jenkins 51 | mountPath: /var/jenkins_home 52 | env: 53 | - name: LIMITS_MEMORY 54 | valueFrom: 55 | resourceFieldRef: 56 | resource: limits.memory 57 | divisor: 1Mi 58 | - name: JAVA_OPTS 59 | value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai 60 | securityContext: 61 | fsGroup: 1000 62 | volumes: 63 | - name: jenkinshome 64 | persistentVolumeClaim: 65 | claimName: opspvc -------------------------------------------------------------------------------- /apps/Jenkins/jenkins-rbd-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: jenkins-rbd-pvc 5 | namespace: kube-ops 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | volumeMode: Filesystem 10 | resources: 11 | requests: 12 | storage: 20Gi 13 | storageClassName: fast -------------------------------------------------------------------------------- /apps/Jenkins/jenkins-statefulset.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: StatefulSet 4 | metadata: 5 | name: jenkins 6 | namespace: kube-ops 7 | labels: 8 | app: jenkins 9 | spec: 10 | serviceName: jenkins-svc 11 | replicas: 1 12 | updateStrategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | name: jenkins 17 | labels: 18 | app: jenkins 19 | spec: 20 | terminationGracePeriodSeconds: 10 21 | serviceAccountName: jenkins 22 | containers: 23 | - name: jenkins 24 | image: jenkins/jenkins:lts 25 | imagePullPolicy: IfNotPresent 26 | ports: 27 | - containerPort: 8080 28 | name: web 29 | protocol: TCP 30 | - containerPort: 50000 31 | name: agent 32 | protocol: TCP 33 | resources: 34 | limits: 35 | cpu: 1000m 36 | memory: 1Gi 37 | requests: 38 | cpu: 500m 39 | memory: 512Mi 40 | livenessProbe: 41 | httpGet: 42 | path: /login 43 | port: 8080 44 | initialDelaySeconds: 60 45 | timeoutSeconds: 5 46 | failureThreshold: 12 47 | readinessProbe: 48 | httpGet: 49 | path: /login 50 | port: 8080 51 | initialDelaySeconds: 60 52 | timeoutSeconds: 5 53 | failureThreshold: 12 54 | volumeMounts: 55 | - name: jenkinshome 56 | subPath: jenkins 57 | mountPath: /var/jenkins_home 58 | env: 59 | - name: LIMITS_MEMORY 60 | valueFrom: 61 | resourceFieldRef: 62 | resource: limits.memory 63 | divisor: 1Mi 64 | - name: JAVA_OPTS 65 | value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai 66 | securityContext: 67 | fsGroup: 1000 68 | volumes: 69 | - name: jenkinshome 70 | persistentVolumeClaim: 71 | claimName: jenkins-rbd-pvc 72 | -------------------------------------------------------------------------------- /apps/Jenkins/jenkins-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: jenkins-svc 6 | namespace: kube-ops 7 | labels: 8 | app: jenkins-svc 9 | spec: 10 | selector: 11 | app: jenkins 12 | clusterIP: None 13 | ports: 14 | - name: http 15 | port: 8080 16 | targetPort: 8080 17 | - name: agent 18 | port: 50000 19 | targetPort: 50000 20 | --- 21 | apiVersion: extensions/v1beta1 22 | kind: Ingress 23 | metadata: 24 | name: jenkins-svc-ingress 25 | namespace: kube-ops 26 | annotations: 27 | kubernetes.io/ingress.class: "nginx" 28 | spec: 29 | rules: 30 | - host: jenkins.mofangge.net 31 | http: 32 | paths: 33 | - backend: 34 | serviceName: jenkins-svc 35 | servicePort: 8080 -------------------------------------------------------------------------------- /apps/Jenkins/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: jenkins 5 | namespace: kube-ops 6 | 7 | --- 8 | 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1beta1 11 | metadata: 12 | name: jenkins 13 | rules: 14 | - apiGroups: ["extensions", "apps"] 15 | resources: ["deployments"] 16 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"] 17 | - apiGroups: [""] 18 | resources: ["services"] 19 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"] 20 | - apiGroups: [""] 21 | resources: ["pods"] 22 | verbs: ["create","delete","get","list","patch","update","watch"] 23 | - apiGroups: [""] 24 | resources: ["pods/exec"] 25 | verbs: ["create","delete","get","list","patch","update","watch"] 26 | - apiGroups: [""] 27 | resources: ["pods/log"] 28 | verbs: ["get","list","watch"] 29 | - apiGroups: [""] 30 | resources: ["secrets"] 31 | verbs: ["get"] 32 | 33 | --- 34 | apiVersion: rbac.authorization.k8s.io/v1beta1 35 | kind: ClusterRoleBinding 36 | metadata: 37 | name: jenkins 38 | namespace: kube-ops 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: jenkins 43 | subjects: 44 | - kind: ServiceAccount 45 | name: jenkins 46 | namespace: kube-ops -------------------------------------------------------------------------------- /apps/myapp-http-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp 5 | labels: 6 | app: myapp 7 | spec: 8 | containers: 9 | - name: myapp 10 | image: ikubernetes/myapp:v1 11 | imagePullPolicy: IfNotPresent 12 | ports: 13 | - containerPort: 80 14 | restartPolicy: Always 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: myapp-service 20 | namespace: default 21 | spec: 22 | selector: 23 | app: myapp 24 | type: ClusterIP 25 | ports: 26 | - protocol: TCP 27 | port: 80 28 | --- 29 | 30 | apiVersion: extensions/v1beta1 31 | kind: Ingress 32 | metadata: 33 | name: myapp 34 | annotations: 35 | kubernetes.io/ingress.class: "nginx" 36 | spec: 37 | rules: 38 | - host: myapp.k8s.local 39 | http: 40 | paths: 41 | - path: / 42 | backend: 43 | serviceName: myapp-service 44 | servicePort: 80 45 | -------------------------------------------------------------------------------- /docs/HPA.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/docs/HPA.md -------------------------------------------------------------------------------- /docs/Jenkins/Jenkins-dynamic-slave.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/docs/Jenkins/Jenkins-dynamic-slave.pdf -------------------------------------------------------------------------------- /docs/Jenkins/images/k8s-slave.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/docs/Jenkins/images/k8s-slave.png -------------------------------------------------------------------------------- /docs/Jenkins/images/pod-labels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/docs/Jenkins/images/pod-labels.png -------------------------------------------------------------------------------- /docs/ca-install.md: -------------------------------------------------------------------------------- 1 | # 手动制作CA证书 2 | 3 | ## 1.安装 CFSSL 4 | ``` 5 | [root@linux-node1 ~]# cd /usr/local/src 6 | [root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 7 | [root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 8 | [root@linux-node1 src]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 9 | [root@linux-node1 src]# chmod +x cfssl* 10 | [root@linux-node1 src]# mv cfssl-certinfo_linux-amd64 /opt/kubernetes/bin/cfssl-certinfo 11 | [root@linux-node1 src]# mv cfssljson_linux-amd64 /opt/kubernetes/bin/cfssljson 12 | [root@linux-node1 src]# mv cfssl_linux-amd64 /opt/kubernetes/bin/cfssl 13 | 复制cfssl命令文件到k8s-node1和k8s-node2节点。如果实际中多个节点,就都需要同步复制。 14 | [root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* linux-node2: /opt/kubernetes/bin 15 | [root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* linux-node3: /opt/kubernetes/bin 16 | [root@linux-node1 ~]# scp /opt/kubernetes/bin/cfssl* linux-node4: /opt/kubernetes/bin 17 | ``` 18 | 19 | ## 2.初始化cfssl 20 | ``` 21 | [root@linux-node1 src]# mkdir ssl && cd ssl 22 | [root@linux-node1 ssl]# cfssl print-defaults config > config.json 23 | [root@linux-node1 ssl]# cfssl print-defaults csr > csr.json 24 | ``` 25 | 26 | ## 3.创建用来生成 CA 文件的 JSON 配置文件 27 | 28 | CA 配置文件用于配置根证书的使用场景 (profile) 和具体参数 (usage,过期时间、服务端认证、客户端认证、加密等) 29 | 30 | ```bash 31 | [root@linux-node1 ssl]# vim ca-config.json 32 | { 33 | "signing": { 34 | "default": { 35 | "expiry": "876000h" 36 | }, 37 | "profiles": { 38 | "kubernetes": { 39 | "usages": [ 40 | "signing", 41 | "key encipherment", 42 | "server auth", 43 | "client auth" 44 | ], 45 | "expiry": "876000h" 46 | } 47 | } 48 | } 49 | } 50 | ``` 51 | - signing:表示该证书可用于签名其它证书(生成的 ca.pem 证书中 CA=TRUE); 52 | - server auth:表示 client 可以用该该证书对 server 提供的证书进行验证; 53 | - client auth:表示 server 可以用该该证书对 client 提供的证书进行验证; 54 | - "expiry": "876000h":证书有效期设置为 100 年; 55 | 56 | ## 4.创建用来生成 CA 证书签名请求(CSR)的 JSON 配置文件 57 | ```bash 58 | [root@linux-node1 ssl]# vim ca-csr.json 59 | { 60 | "CN": "kubernetes", 61 | "key": { 62 | "algo": "rsa", 63 | "size": 2048 64 | }, 65 | "names": [ 66 | { 67 | "C": "CN", 68 | "ST": "BeiJing", 69 | "L": "BeiJing", 70 | "O": "k8s", 71 | "OU": "System" 72 | } 73 | ], 74 | "ca": { 75 | "expiry": "876000h" 76 | } 77 | } 78 | ``` 79 | - CN:Common Name:kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name),浏览器使用该字段验证网站是否合法; 80 | - O:Organization:kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group); 81 | - kube-apiserver 将提取的 User、Group 作为 RBAC 授权的用户标识; 82 | 83 | PS: 84 | - 不同证书 csr 文件的 CN、C、ST、L、O、OU 组合必须不同,否则可能出现 `PEER'S CERTIFICATE HAS AN INVALID SIGNATURE` 错误; 85 | - 后续创建证书的 csr 文件时,CN 都不相同(C、ST、L、O、OU 相同),以达到区分的目的 86 | ## 5.生成CA证书(ca.pem)和密钥(ca-key.pem) 87 | ``` 88 | [root@ linux-node1 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca 89 | [root@ linux-node1 ssl]# ls -l ca* 90 | -rw-r--r-- 1 root root 290 Mar 4 13:45 ca-config.json 91 | -rw-r--r-- 1 root root 1001 Mar 4 14:09 ca.csr 92 | -rw-r--r-- 1 root root 208 Mar 4 13:51 ca-csr.json 93 | -rw------- 1 root root 1679 Mar 4 14:09 ca-key.pem 94 | -rw-r--r-- 1 root root 1359 Mar 4 14:09 ca.pem 95 | ``` 96 | 这是Kubernetes 集群根证书CA(Kubernetes集群组件的证书签发机构);对应kubeadm安装生成证书的路径为: 97 | 98 | ``` 99 | /etc/kubernetes/pki/ca.pem ------> /etc/kubernetes/pki/ca.crt 100 | /etc/kubernetes/pki/ca-key.pem ------> /etc/kubernetes/pki/ca.key 101 | ``` 102 | 103 | ## 6.分发证书 104 | ``` 105 | # cp ca.csr ca.pem ca-key.pem ca-config.json /opt/kubernetes/ssl 106 | SCP证书到linux-node2和linux-node3节点 107 | # scp ca.csr ca.pem ca-key.pem ca-config.json linux-node2:/opt/kubernetes/ssl 108 | # scp ca.csr ca.pem ca-key.pem ca-config.json linux-node3:/opt/kubernetes/ssl 109 | # scp ca.csr ca.pem ca-key.pem ca-config.json linux-node4:/opt/kubernetes/ssl 110 | ``` 111 | -------------------------------------------------------------------------------- /docs/coredns.md: -------------------------------------------------------------------------------- 1 | # Kubernetes CoreDNS 2 | 3 | ## 部署集群DNS 4 | 5 | DNS 是 k8s 集群首先需要部署的,集群中的其他 pods 使用它提供域名解析服务;主要可以解析 集群服务名 SVC 和 Pod hostname;目前 k8s v1.9+ 版本可以有两个选择:kube-dns 和 coredns(推荐),可以选择其中一个部署安装。 6 | 配置文件参考 `https://github.com/kubernetes/kubernetes` 项目目录 `kubernetes/cluster/addons/dns` 7 | 8 | 本项目暂不支持,自动化安装coredns,需要手动安装。 配置模板在 `/srv/addons/coredns/coredns.yaml`,可自行修改镜像版本以及配置。 9 | 10 | ## 创建CoreDNS 11 | ```bash 12 | [root@linux-node1 ~]# kubectl create -f /srv/addons/coredns/coredns.yaml 13 | 14 | [root@linux-node1 ~]# kubectl get pod -n kube-system 15 | NAME READY STATUS RESTARTS AGE 16 | coredns-77c989547b-9pj8b 1/1 Running 0 6m 17 | coredns-77c989547b-kncd5 1/1 Running 0 6m 18 | ``` 19 | -------------------------------------------------------------------------------- /docs/dashboard.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Dashboard 2 | 3 | ## 创建Dashboard 4 | 5 | 需要CoreDNS部署成功之后再安装Dashboard。 6 | 7 | ```bash 8 | [root@linux-node1 ~]# kubectl create -f /srv/addons/dashboard/ 9 | serviceaccount/admin-user created 10 | clusterrolebinding.rbac.authorization.k8s.io/admin-user created 11 | secret/kubernetes-dashboard-certs created 12 | serviceaccount/kubernetes-dashboard created 13 | role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created 14 | rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created 15 | deployment.apps/kubernetes-dashboard created 16 | service/kubernetes-dashboard created 17 | ``` 18 | 19 | ## 访问Dashboard 20 | 21 | https://192.168.150.141:30000 22 | 23 | 用户名:admin 密码:admin 选择Token令牌模式登录。 24 | 25 | ### 获取Token 26 | 27 | ```bash 28 | [root@linux-node1 dashboard]# kubectl get svc -n kube-system 29 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 30 | coredns ClusterIP 10.1.0.2 53/UDP,53/TCP 16h 31 | kubernetes-dashboard NodePort 10.1.59.125 443:30000/TCP 4m9s 32 | metrics-server ClusterIP 10.1.147.166 443/TCP 16h 33 | [root@linux-node1 dashboard]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') 34 | Name: admin-user-token-xqgs9 35 | Namespace: kube-system 36 | Labels: 37 | Annotations: kubernetes.io/service-account.name: admin-user 38 | kubernetes.io/service-account.uid: ebc22e42-3afd-11e9-8b62-000c294df153 39 | 40 | Type: kubernetes.io/service-account-token 41 | 42 | Data 43 | ==== 44 | ca.crt: 1359 bytes 45 | namespace: 11 bytes 46 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXhxZ3M5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlYmMyMmU0Mi0zYWZkLTExZTktOGI2Mi0wMDBjMjk0ZGYxNTMiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.mIy5V1JmSFoy0CHnbk25jn3D0ajqfPaX-0WQJSzqmmXz2J9rQ-tlyrc9Sn22JkrQWqFyNaaBfbyI1tdgRgw9q8cA0kaPZCV8Q_pz7VdeLCnXoDUbpzgGm6QqdwY_42HmSkxd6GBKEZLwbPEyTTabPeml3DtvQxGEUD58TKoxUojaRUOR2DPBuwSUxPhrG8c3gN-r3p9nRtwrVdoK2DkFifFb8zcLk3uS3j4Yl_PdpEArhqdnFpg-XOg5e4-9MkIh25WOHJl0keYRenM51nUS24hLob13JvdcSTSo-IQXN6jtaAL0tL-P1RLMeMvlDRhvgSwrGOETuYmJgbVWp_7H3w 47 | 48 | ``` 49 | -------------------------------------------------------------------------------- /docs/heapster.md: -------------------------------------------------------------------------------- 1 | ## heapster部署 2 | 3 | ### 部署heapster组件 4 | 5 | ``` 6 | [root@linux-node1 ~]# kubectl create -f /srv/addons/heapster/ 7 | 8 | ``` 9 | 10 | ### 登录Dashboard查看 11 | 12 | 登录Dashboard即可查看到对应的监控图表。 13 | -------------------------------------------------------------------------------- /docs/helm.md: -------------------------------------------------------------------------------- 1 | 1.部署Helm客户端 2 | 3 | ```Bash 4 | [root@linux-node1 ~]# cd /usr/local/src 5 | [root@linux-node1 src]# wget https://storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz 6 | [root@linux-node1 src]# tar zxf helm-v2.12.3-linux-amd64.tar.gz 7 | [root@linux-node1 src]# mv linux-amd64/helm /usr/local/bin/ 8 | ``` 9 | 10 | 2.初始化Helm并部署Tiller服务端 11 | 12 | ```Bash 13 | [root@linux-node1 ~]# helm init --upgrade –i \ 14 | registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.12.3 \ 15 | --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts 16 | ``` 17 | 18 | 3.所有节点安装socat命令 19 | 20 | ```Bash 21 | [root@linux-node1 ~]# yum install -y socat 22 | ``` 23 | 24 | 4.验证安装是否成功 25 | 26 | ```Bash 27 | [root@linux-node1 ~]# helm version 28 | Client: &version.Version{SemVer:"v2.12.3", GitCommit:"eecf22f77df5f65c823aacd2dbd30ae6c65f186e", GitTreeState:"clean"} 29 | Server: &version.Version{SemVer:"v2.12.3", GitCommit:"eecf22f77df5f65c823aacd2dbd30ae6c65f186e", GitTreeState:"clean"} 30 | ``` 31 | 32 | 5.查看helm tiller的服务 33 | 34 | ```Bash 35 | [root@linux-node1 ~]# kubectl get pod --all-namespaces|grep tiller 36 | kube-system tiller-deploy-5687f55748-qb5kg 1/1 Running 0 30s 37 | ``` 38 | 39 | 6.使用Helm部署第一个应用 40 | 41 | 6.1创建服务账号 42 | 43 | ```Bash 44 | [root@linux-node1 ~]# kubectl create serviceaccount --namespace kube-system tiller 45 | serviceaccount "tiller" created 46 | ``` 47 | 48 | 6.2.创建集群的角色绑定 49 | 50 | ```Bash 51 | [root@linux-node1 ~]# kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller 52 | clusterrolebinding.rbac.authorization.k8s.io "tiller-cluster-rule" created 53 | ``` 54 | 55 | 6.3.为应用程序设置serviceAccount 56 | 57 | ```Bash 58 | [root@linux-node1 ~]# kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' 59 | deployment.extensions "tiller-deploy" patched 60 | ``` 61 | 62 | 6.4.搜索Helm应用 63 | 64 | ```Bash 65 | [root@linux-node1 ~]# helm search jenkins 66 | NAME CHART VERSION APP VERSION DESCRIPTION 67 | stable/jenkins 0.13.5 2.73 Open source continuous integration server. It s... 68 | 69 | 70 | [root@linux-node1 ~]# helm repo list 71 | NAME URL 72 | stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts 73 | local http://127.0.0.1:8879/charts 74 | 75 | [root@linux-node1 ~]# helm install stable/jenkins 76 | ``` 77 | -------------------------------------------------------------------------------- /docs/ingress-nginx.md: -------------------------------------------------------------------------------- 1 | ## 关于Service(资源) 2 | 3 | - Service-四层调度 4 | - 工作模型: Userspace Iptables Ipvs三种 5 | - 五种类型 6 | 1. ClusterIP 只能在集群内部可达,可以被pod和node所访问。无法接入集群外部的流量。 7 | 2. Nodeport 8 | 9 | ``` 10 | 访问流程 11 | Client --> NodeIP:NodePort--> ClusterIP:SvcPort-->PodIP:containerPort 12 | 一般情况会给NodePort进行负载均衡器。 13 | ``` 14 | 3. LoadBalancer 15 | 4. ExternelName 16 | 5. No ClusterIP - Headless Service(无头服务) `ServiceName-->PodIP` 17 | 18 | 19 | 20 | ## 关于Ingress Controller和Ingress 21 | 22 | - Ingress Controller通常是有用7层协议代理能力的控制器,自身也是运行与集群中的Pod资源对象。 23 | - Nginx 24 | - Traefik 25 | - Envoy 26 | 27 | - Ingress也是标准的kubernetes资源类型之一,它其实就是一组基于DNS名称或URL路径把请求转发至指定的service资源规则,用于将集群外部的请求流量转发至集群内部完成服务发布。 28 | 29 | - Ingress 是 Kubernetes 中的一个抽象资源,其功能是通过 Web Server 的 Virtual Host 概念以域名(Domain Name)方式转发到內部 Service,这避免了使用 Service 中的 NodePort 与 LoadBalancer 类型所带來的限制(如 Port 数量上限),而实现 Ingress 功能则是通过 Ingress Controller 来达成,它会负责监听 Kubernetes API 中的 Ingress 与 Service 资源物件,并在发生资源变化时,根据资源预期的结果来设置 Web Server 30 | 31 | - Ingress Controller实现的基本逻辑如下: 32 | 33 | 1. 监听apiserver,获取全部ingress的定义 34 | 2. 基于ingress的定义,生成Nginx所需的配置文件(/etc/nginx/nginx.conf) 35 | 3. 执行nginx -s reload命令,重新加载nginx.conf配置文件的内容 36 | 37 | ![基本原理图](../images/ingress.png) 38 | ## 部署 39 | 40 | ```bash 41 | kubectl create ns ingress-nginx 42 | #这里注意,要修改一下ingress-controller-svc.yml 中的externalIPs的地址。 就是你自定义LB的地址。 43 | kubectl apply -f /srv/addons/nginx-ingress/ 44 | ``` 45 | 46 | ## 验证 47 | 48 | ```bash 49 | [root@k8s-master01 nginx-ingress]# kubectl -n ingress-nginx get po,svc 50 | NAME READY STATUS RESTARTS AGE 51 | pod/default-http-backend-774686c976-m929c 1/1 Running 0 6m17s 52 | pod/nginx-ingress-controller-7fc9fc4457-9cvdd 1/1 Running 0 6m17s 53 | pod/nginx-ingress-controller-7fc9fc4457-gf76d 1/1 Running 0 6m17s 54 | 55 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 56 | service/default-http-backend ClusterIP 10.93.113.133 80/TCP 8m15s 57 | 58 | [root@k8s-master01 ~]# curl http://10.93.113.133 59 | default backend - 404 60 | 61 | #确认上面步骤都沒问题后,就可以通过 kubeclt 建立简单 Myapp 来测试功能,此处可能需要先配置MetaILB 62 | [root@linux-node1 apps]# kubectl apply -f /srv/apps/myapp-http-svc.yaml 63 | pod/myapp unchanged 64 | service/myapp-service unchanged 65 | ingress.extensions/myapp unchanged 66 | #访问测试 67 | [root@linux-node1 apps]# curl http://192.168.150.252 -H 'Host: myapp.k8s.local' 68 | Hello MyApp | Version: v1 | Pod Name 69 | ``` 70 | 71 | ## 关于Ingress service优化配置 72 | 73 | - 由于ingress-controller是集群对外暴露服务的一种方式。这里我们可以将他的cluster IP 设置为10.1.159.254 方便记忆和查找。 74 | - 如果service的externalTrafficPolicy为Local,那么只有service关联的pod所在的节点上才能通过nodeport访问。 75 | - kubectl uncordon mastername 让master节点也参与调度。 76 | - yaml文件中通过initContainers的方式优化nginx-ingress内核参数。 77 | 78 | ## ingress-controller扩展 79 | 80 | ## `daemonset`和`hostNetwork`相结合 81 | 82 | - `Ingress`其实就是一组基于DNS名称或URL路径把请求转发至指定的`service`资源规则,用于将集群外部的请求流量转发至集群内部完成服务发布。 83 | - `ingress-controller`本身需要通过`cluster-svc`或者`nodePort`的方式暴露在集群外部。这样就会导致服务间网络的多层转发,增加网络开销。 84 | - 我们可以选择以`Damonset` 和 共享`node`节点宿主机网络的方式部署 `Ingress-Controller`.而不再需要部署`Ingress-Controller`的 `svc`。没有额外的网络开销,我们只需要在前端配置`nginx`转发到后端指定`node`节点的`80`或者`443`端口即可。 85 | - 我们在部署`Damonset`时需要根据`labels`标签选择器选择指定节点即可。 86 | - 此时也可以解决我们内部`kubernetes`集群没有LB的尴尬。 87 | 88 | ## 部署方式 89 | - 修改详情,大家可自行参考 `/srv/addons/nginx-ingress-dm/` 下的`ingress-controller.yml` 和 `ingress-controller-svc.yml` 文件。 90 | 91 | - 主要修改内容 92 | 93 | ```yaml 94 | nodeSelector: 95 | ingress: nginx 96 | hostNetwork: true 97 | ``` 98 | - 部署 99 | 100 | ```Bash 101 | #先将需要部署ingress-controller节点打上labels,注意这两个节点的80和443端口不要被占用。 102 | kubectl label nodes linux-node1 ingress=nginx 103 | kubectl label nodes linux-node3 ingress=nginx 104 | #先删除之前部署的ingress 105 | kubectl delete -f /srv/addons/nginx-ingress/ 106 | #然后应用一下文件 107 | kubectl apply -f /srv/addons/nginx-ingress-dm/ 108 | ``` 109 | 110 | PS: 111 | 在不使用kubernetes支持的公有云平台下,默认的 ADDRESS地址会显示为空,如果需要显示节点的IP,可以添加--report-node-internal-ip-address=true的参数,或者修改flags.go z中的源码,对应的useNodeInternalIP = flags.Bool("report-node-internal-ip-address", false, 中 false 修改为 true。 112 | -------------------------------------------------------------------------------- /docs/ingress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/docs/ingress.png -------------------------------------------------------------------------------- /docs/metrics-server.md: -------------------------------------------------------------------------------- 1 | # Kubernetes metrics-server 2 | 3 | Metrics Server 是实现了 Metrics API 的元件,其目标是取代 Heapster 作为 Pod 与 Node 提供资源的 Usage metrics,该元件会从每个 Kubernetes 节点上的 Kubelet 所公开的 Summary API 中收集 Metrics。 4 | - Horizontal Pod Autoscaler(HPA)控制器用于实现基于CPU使用率进行自动Pod伸缩的功能 5 | - HPA控制器基于Master的kube-controller-manager服务启动参数–horizontal-pod-autoscaler-sync-period定义是时长(默认30秒),周期性监控目标Pod的CPU使用率,并在满足条件时对ReplicationController或Deployment中的Pod副本数进行调整,以符合用户定义的平均Pod CPU使用率。 6 | - 在新版本的kubernetes中 Pod CPU使用率不在来源于heapster,而是来自于metrics-server。 7 | - yml 文件来自于github https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy/1.8+ 8 | - /etc/kubernetes/pki/ca.pem 文件来自于部署kubernetes集群 9 | - 需要对yml文件进行修改才可使用 改动自行见文件。 10 | - API相关的配置参数如下: 11 | 12 | ```bash 13 | --requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \ 14 | --requestheader-allowed-names= \ 15 | --requestheader-extra-headers-prefix="X-Remote-Extra-" \ 16 | --requestheader-group-headers=X-Remote-Group \ 17 | --requestheader-username-headers=X-Remote-User \ 18 | --proxy-client-cert-file=/opt/kubernetes/ssl/metrics-server.pem \ 19 | --proxy-client-key-file=/opt/kubernetes/ssl/metrics-server-key.pem \ 20 | ``` 21 | 22 | ## 创建metrics-server 23 | 24 | ```bash 25 | [root@linux-node1 ~]# kubectl apply -f /srv/addons/metrics-server/metrics-server-1.12up.yaml 26 | clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created 27 | clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created 28 | rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created 29 | apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created 30 | serviceaccount/metrics-server created 31 | deployment.extensions/metrics-server created 32 | service/metrics-server created 33 | clusterrole.rbac.authorization.k8s.io/system:metrics-server created 34 | clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created 35 | ``` 36 | 37 | ## 查看pod状态 38 | 39 | ```bash 40 | [root@linux-node1 ~]# kubectl -n kube-system get po -l k8s-app=metrics-server 41 | NAME READY STATUS RESTARTS AGE 42 | metrics-server-79b544fd7b-tkh8m 1/1 Running 0 14h 43 | ``` 44 | 45 | ## 收集 Metrics,执行 kubectl top 指令查看 46 | 47 | ```bash 48 | [root@linux-node1 ~]# kubectl get --raw /apis/metrics.k8s.io/v1beta1 49 | {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} 50 | 51 | 52 | [root@linux-node1 ~]# kubectl get apiservice|grep metrics 53 | v1beta1.metrics.k8s.io kube-system/metrics-server True 14h 54 | 55 | [root@linux-node1 ~]# kubectl top node 56 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% 57 | linux-node1 128m 6% 884Mi 47% 58 | linux-node2 133m 6% 1032Mi 55% 59 | linux-node3 184m 9% 983Mi 52% 60 | linux-node4 29m 2% 343Mi 39% 61 | ``` 62 | -------------------------------------------------------------------------------- /docs/nginx-install.md: -------------------------------------------------------------------------------- 1 | ## 基于 nginx 代理的 kube-apiserver 高可用方案 2 | 3 | - 控制节点的 kube-controller-manager、kube-scheduler 是多实例部署,所以只要有一个实例正常,就可以保证高可用 4 | - 集群内的 Pod 使用域名 kubernetes 访问 kube-apiserver, kube-dns 会自动解析出多个 kube-apiserver 节点的 IP,所以也是高可用的 5 | - kubelet、kube-proxy、controller-manager、scheduler 通过本地的 kube-nginx(监听 127.0.0.1)访问 kube-apiserver,从而实现 kube-apiserver 的高可用 6 | - kube-nginx 会对所有 kube-apiserver 实例做健康检查和负载均衡 7 | - 需要在所有节点上安装nginx 8 | 9 | ## 下载和编译 nginx 10 | 11 | - 下载源码 12 | 13 | ```Bash 14 | cd /usr/local/src 15 | wget http://nginx.org/download/nginx-1.15.3.tar.gz 16 | tar -xzvf nginx-1.15.3.tar.gz 17 | ``` 18 | 19 | - 配置编译参数 20 | 21 | `--with-stream` : 开启 4 层透明转发(TCP Proxy)功能 22 | 23 | `--without-xxx` : 关闭所有其他功能,这样生成的动态链接二进制程序依赖最小 24 | 25 | ```Bash 26 | mkdir /opt/kubernetes/kube-nginx 27 | cd /usr/local/src/nginx/nginx-1.15.3 28 | ./configure --with-stream --without-http --prefix=/opt/kubernetes/kube-nginx --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module 29 | make && make install 30 | ``` 31 | - 验证编译的nginx 32 | 33 | ```Bash 34 | /opt/kubernetes/kube-nginx/sbin/nginx -v 35 | ldd /opt/kubernetes/kube-nginx/sbin/nginx 36 | ## 由于只开启了 4 层透明转发功能,所以除了依赖 libc 等操作系统核心 lib 库外,没有对其它 lib 的依赖(如 libz、libssl 等),这样可以方便部署到各版本操作系统中 37 | ``` 38 | 39 | ## 配置文件`kube-nginx.conf` 40 | 41 | - 配置nginx,开启4层透明转发功能;需要根据集群 kube-apiserver 的实际情况,替换 backend 中 server 列表 42 | 43 | ```Bash 44 | cat /opt/kubernetes/kube-nginx/conf/kube-nginx.conf < /usr/lib/systemd/system/kube-nginx.service <初始化系统配置以适合docker和k8s运行 2 | 3 | #### 所有机器关闭防火墙和SELinux 4 | 5 | ```bash 6 | systemctl disable --now firewalld NetworkManager postfix 7 | setenforce 0 8 | sed -ri '/^[^#]*SELINUX=/s#=.+$#=disabled#' /etc/selinux/config 9 | ``` 10 | #### 关闭swap交换分区 11 | 12 | ```bash 13 | swapoff -a && sysctl -w vm.swappiness=0 14 | sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab 15 | ``` 16 | 17 | #### 如果是开启了GUI环境,建议关闭dnsmasq(可选) 18 | 19 | linux 系统开启了 dnsmasq 后(如 GUI 环境),将系统 DNS Server 设置为 127.0.0.1,这会导致 docker 容器无法解析域名,需要关闭它. 20 | 21 | ```bash 22 | systemctl disable --now dnsmasq 23 | ``` 24 | #### 设置时间同步客户端 25 | 26 | ```bash 27 | yum install chrony -y 28 | cat < /etc/chrony.conf 29 | server ntp.aliyun.com iburst 30 | stratumweight 0 31 | driftfile /var/lib/chrony/drift 32 | rtcsync 33 | makestep 10 3 34 | bindcmdaddress 127.0.0.1 35 | bindcmdaddress ::1 36 | keyfile /etc/chrony.keys 37 | commandkey 1 38 | generatecommandkey 39 | logchange 0.5 40 | logdir /var/log/chrony 41 | EOF 42 | 43 | systemctl restart chronyd 44 | systemctl enable --now chronyd 45 | ``` 46 | 47 | 48 | #### 升级内核,并配置saltstack阿里云的yum源。 49 | 50 | ```bash 51 | yum install wget git jq psmisc vim -y 52 | wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo 53 | yum install https://mirrors.aliyun.com/saltstack/yum/redhat/salt-repo-latest-2.el7.noarch.rpm 54 | sed -i "s/repo.saltstack.com/mirrors.aliyun.com\/saltstack/g" /etc/yum.repos.d/salt-latest.repo 55 | ``` 56 | 57 | - 因为目前市面上包管理下内核版本会很低,安装docker后无论centos还是ubuntu会有如下bug,4.15的内核依然存在. 58 | 59 | ``` 60 | kernel:unregister_netdevice: waiting for lo to become free. Usage count = 1 61 | ``` 62 | 63 | - 建议升级内核,耿直boy会出现更多问题 64 | 65 | ```bash 66 | #perl是内核的依赖包,如果没有就安装下 67 | [ ! -f /usr/bin/perl ] && yum install perl -y 68 | #升级内核需要使用 elrepo 的yum 源,首先我们导入 elrepo 的 key并安装 elrepo 源 69 | rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org 70 | rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm 71 | #查看可用的内核 72 | yum --disablerepo="*" --enablerepo="elrepo-kernel" list available --showduplicates 73 | #在yum的ELRepo源中,mainline 为最新版本的内核,安装kernel 74 | 75 | #ipvs依赖于nf_conntrack_ipv4内核模块,4.19包括之后内核里改名为nf_conntrack,但是kube-proxy的代码里没有加判断一直用的nf_conntrack_ipv4,所以这里我安装4.19版本以下的内核; 76 | #下面链接可以下载到其他归档版本的 77 | ubuntu http://kernel.ubuntu.com/~kernel-ppa/mainline/ 78 | RHEL http://mirror.rc.usf.edu/compute_lock/elrepo/kernel/el7/x86_64/RPMS/ 79 | ``` 80 | 81 | - 自选版本内核安装方法 82 | 83 | ```bash 84 | export Kernel_Vsersion=4.18.9-1 85 | wget http://mirror.rc.usf.edu/compute_lock/elrepo/kernel/el7/x86_64/RPMS/kernel-ml{,-devel}-${Kernel_Vsersion}.el7.elrepo.x86_64.rpm 86 | yum localinstall -y kernel-ml* 87 | 88 | #查看这个内核里是否有这个内核模块 89 | find /lib/modules -name '*nf_conntrack_ipv4*' -type f 90 | ``` 91 | - 修改内核启动顺序,默认启动的顺序应该为1,升级以后内核是往前面插入,为0(如果每次启动时需要手动选择哪个内核,该步骤可以省略) 92 | 93 | ```bash 94 | grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg 95 | #使用下面命令看看确认下是否启动默认内核指向上面安装的内核 96 | grubby --default-kernel 97 | ``` 98 | - docker官方的内核检查脚本建议(RHEL7/CentOS7: User namespaces disabled; add 'user_namespace.enable=1' to boot command line),使用下面命令开启 99 | 100 | ```bash 101 | grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)" 102 | #重新加载内核 103 | reboot 104 | ``` 105 | 106 | 107 | #### 检查系统内核和模块是否适合运行 docker (仅适用于 linux 系统) 108 | 109 | ```bash 110 | curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh > check-config.sh 111 | bash ./check-config.sh 112 | ``` 113 | 114 | #### 需要设定 `/etc/hosts` 解析到所有集群主机 115 | 116 | ``` 117 | 192.168.150.141 linux-node1 118 | 192.168.150.142 linux-node2 119 | 192.168.150.143 linux-node3 120 | 192.168.150.144 linux-node4 121 | ``` 122 | -------------------------------------------------------------------------------- /docs/架构设计.md: -------------------------------------------------------------------------------- 1 | # 架构设计 2 | 3 | ## 1.目录结构 4 | 5 | 所有文件均存放在/opt/kubernetes目录下 6 | ``` 7 | [root@linux-node1 ~]# tree -L 1 /opt/kubernetes/ 8 | /opt/kubernetes/ 9 | ├── bin #二进制文件 10 | ├── cfg #配置文件 11 | ├── log #日志文件 12 | └── ssl #证书文件 13 | 14 | ``` 15 | 16 | #### 此处留个备份 17 | 5.5 这里依然可以使用`salt-ssh '*' state.highstate`的方式部署,但是这里会有一个BUG。 18 | 19 | ```bash 20 | [root@linux-node1 ~]# salt-ssh '*' state.highstate 21 | ``` 22 | 此时node节点的flannel会启动失败,由于salt-ssh在执行的过程中会先执行node节点,导致flannel在生成flanneld.kubeconfig的时候无法写入user和tocken。此时只需要执行以下命令即可修复此BUG。 23 | 24 | ```Bash 25 | [root@linux-node1 ~]# /bin/bash /opt/kubernetes/bin/flannelkubeconfig.sh 26 | [root@linux-node1 ~]# systemctl restart flannel 27 | ``` 28 | 29 | 30 | 为Master节点打上污点,让POD尽可能的不要调度到Master节点上。 31 | 32 | 关于污点的说明大家可自行百度。 33 | 34 | ```bash 35 | kubectl describe node linux-node1 36 | kubectl taint node k8s-m1 node-role.kubernetes.io/master=linux-node1:PreferNoSchedule 37 | kubectl taint node k8s-m2 node-role.kubernetes.io/master=linux-node2:PreferNoSchedule 38 | kubectl taint node k8s-m3 node-role.kubernetes.io/master=linux-node3:PreferNoSchedule 39 | ``` 40 | -------------------------------------------------------------------------------- /example/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: cronjob-demo 5 | spec: 6 | schedule: "*/1 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: hello-word 13 | image: alpine 14 | command: ["echo", "CronJob"] 15 | restartPolicy: OnFailure 16 | 17 | -------------------------------------------------------------------------------- /example/harbor-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: harbor-secret 5 | namespace: default 6 | data: 7 | .dockerconfigjson: ewoJImF1dGhzIjogewoJCSIxOTIuMTY4LjU2LjExIjogewoJCQkiYXV0aCI6ICJZV1J0YVc0NlpHVjJiM0J6WldSMUxtTnZiUT09IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOC4wNi4wLWNlIChsaW51eCkiCgl9Cn0= 8 | type: kubernetes.io/dockerconfigjson 9 | -------------------------------------------------------------------------------- /example/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: job-demo 5 | spec: 6 | template: 7 | metadata: 8 | name: job-demo 9 | spec: 10 | containers: 11 | - name: hello-world 12 | image: alpine 13 | command: ["echo", "Hello World!"] 14 | restartPolicy: Never 15 | 16 | -------------------------------------------------------------------------------- /example/k8s-demo.txt: -------------------------------------------------------------------------------- 1 | 2 | 1.创建deployment 3 | kubectl create -f nginx-deployment.yaml 4 | 5 | 2.查看deployment 6 | kubectl get deployment 7 | 8 | 3.查看Pod 9 | kubectl get pod -o wide 10 | 11 | 4.测试Pod访问 12 | curl --head 10.2.83.17 13 | 14 | 5.更新Deployment 15 | kubectl set image deployment/nginx-deployment nginx=nginx:1.12.2 --record 16 | 17 | 6.查看更新后的Deployment 18 | kubectl get deployment -o wide 19 | 20 | 7.查看更新历史 21 | kubectl rollout history deployment/nginx-deployment 22 | 23 | 8.查看具体某一个版本的升级历史 24 | kubectl rollout history deployment/nginx-deployment --revision=1 25 | 26 | 9.快速回滚到上一个版本 27 | kubectl rollout undo deployment/nginx-deployment 28 | 29 | 10.扩容到5个节点 30 | kubectl scale deployment nginx-deployment --replicas 5 -------------------------------------------------------------------------------- /example/nfs-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-demo 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | storageClassName: nfs 12 | nfs: 13 | path: /data/k8s-nfs/pv-demo 14 | server: 192.168.56.11 15 | -------------------------------------------------------------------------------- /example/nfs-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-demo 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | storageClassName: nfs 12 | -------------------------------------------------------------------------------- /example/nginx-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: nginx-daemonset 5 | labels: 6 | app: nginx 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: nginx 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx:1.13.12 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /example/nginx-deployment-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.13.12 20 | ports: 21 | - containerPort: 80 22 | readinessProbe: 23 | httpGet: 24 | scheme: HTTP 25 | path: /healthy 26 | port: 80 27 | initialDelaySeconds: 10 28 | periodSeconds: 5 29 | resources: 30 | requests: 31 | memory: "64Mi" 32 | cpu: "250m" 33 | limits: 34 | memory: "128Mi" 35 | cpu: "500m" 36 | volumeMounts: 37 | - mountPath: "/usr/share/nginx/html" 38 | name: pvc-demo 39 | volumes: 40 | - name: pvc-demo 41 | persistentVolumeClaim: 42 | claimName: pvc-demo 43 | 44 | -------------------------------------------------------------------------------- /example/nginx-deployment-health.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.13.12 20 | ports: 21 | - containerPort: 80 22 | livenessProbe: 23 | exec: 24 | command: 25 | - ps aux | grep nginx 26 | initialDelaySeconds: 10 27 | periodSeconds: 5 28 | timeoutSeconds: 3 29 | readinessProbe: 30 | httpGet: 31 | scheme: HTTP 32 | path: /healthy 33 | port: 80 34 | initialDelaySeconds: 10 35 | periodSeconds: 5 36 | -------------------------------------------------------------------------------- /example/nginx-deployment-limit.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.13.12 20 | ports: 21 | - containerPort: 80 22 | resources: 23 | requests: 24 | memory: "64Mi" 25 | cpu: "250m" 26 | limits: 27 | memory: "128Mi" 28 | cpu: "500m" 29 | -------------------------------------------------------------------------------- /example/nginx-deployment-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.13.12 20 | ports: 21 | - containerPort: 80 22 | volumeMounts: 23 | - mountPath: "/usr/share/nginx/html" 24 | name: pvc-demo 25 | volumes: 26 | - name: pvc-demo 27 | persistentVolumeClaim: 28 | claimName: pvc-demo 29 | -------------------------------------------------------------------------------- /example/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.13.12 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /example/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-ingress 5 | spec: 6 | rules: 7 | - host: www.example.com 8 | http: 9 | paths: 10 | - path: / 11 | backend: 12 | serviceName: nginx-service 13 | servicePort: 80 14 | -------------------------------------------------------------------------------- /example/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.13.12 11 | ports: 12 | - containerPort: 80 13 | -------------------------------------------------------------------------------- /example/nginx-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: nginx-rc 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: nginx 9 | template: 10 | metadata: 11 | name: nginx 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.13.12 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /example/nginx-rs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nginx-rs 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.13.12 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /example/nginx-service-nodeport.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | type: NodePort 13 | 14 | -------------------------------------------------------------------------------- /example/nginx-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | -------------------------------------------------------------------------------- /images/Snipaste_2019-11-07_17-29-32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/Snipaste_2019-11-07_17-29-32.png -------------------------------------------------------------------------------- /images/ingress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/ingress.png -------------------------------------------------------------------------------- /images/jenkins-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/jenkins-1.png -------------------------------------------------------------------------------- /images/k8s3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/k8s3.png -------------------------------------------------------------------------------- /images/k8s4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/k8s4.png -------------------------------------------------------------------------------- /images/metallb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/metallb.png -------------------------------------------------------------------------------- /images/nginx-ingress-cn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/nginx-ingress-cn.png -------------------------------------------------------------------------------- /images/p1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/p1.png -------------------------------------------------------------------------------- /images/prometheus-jg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/prometheus-jg.png -------------------------------------------------------------------------------- /images/traefik.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/traefik.jpg -------------------------------------------------------------------------------- /images/wx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/wx.png -------------------------------------------------------------------------------- /images/zfb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skymyyang/salt-k8s-ha/29386230c598b799cab18c171ce385c1667423cd/images/zfb.png -------------------------------------------------------------------------------- /pillar/k8s.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #******************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Config with Pillar 7 | #******************************************** 8 | 9 | #设置Master的IP地址(必须修改) 10 | MASTER_IP_M1: "192.168.200.181" 11 | MASTER_IP_M2: "192.168.200.182" 12 | MASTER_IP_M3: "192.168.200.183" 13 | #设置Master的HOSTNAME完整的FQDN名称(必须修改) 14 | MASTER_H1: "c8-node1.example.com" 15 | MASTER_H2: "c8-node2.example.com" 16 | MASTER_H3: "c8-node3.example.com" 17 | 18 | #KUBE-APISERVER的反向代理地址端口 19 | #KUBE_APISERVER: "https://127.0.0.1:8443" 20 | KUBE_APISERVER: "https://server.k8s.local:8443" 21 | KUBE_APISERVER_DNS_NAME: "server.k8s.local" 22 | 23 | #设置ETCD集群访问地址(必须修改) 24 | ETCD_ENDPOINTS: "http://192.168.200.181:2379,http://192.168.200.182:2379,http://192.168.200.183:2379" 25 | 26 | FLANNEL_ETCD_PREFIX: "/kubernetes/network" 27 | 28 | #设置ETCD集群初始化列表(必须修改) 29 | ETCD_CLUSTER: "etcd-node1=http://192.168.200.181:2380,etcd-node2=http://192.168.200.182:2380,etcd-node3=http://192.168.200.183:2380" 30 | 31 | #通过Grains FQDN自动获取本机IP地址,请注意保证主机名解析到本机IP地址 32 | NODE_IP: {{ grains['fqdn_ip4'][0] }} 33 | HOST_NAME: {{ grains['fqdn'] }} 34 | 35 | #设置BOOTSTARP的TOKEN,可以自己生成 36 | BOOTSTRAP_TOKEN: "be8dad.da8a699a46edc482" 37 | TOKEN_ID: "be8dad" 38 | TOKEN_SECRET: "da8a699a46edc482" 39 | ENCRYPTION_KEY: "8eVtmpUpYjMvH8wKZtKCwQPqYRqM14yvtXPLJdhu0gA=" 40 | 41 | #配置Service IP地址段 42 | SERVICE_CIDR: "10.96.0.0/16" 43 | 44 | #Kubernetes服务 IP (从 SERVICE_CIDR 中预分配) 45 | CLUSTER_KUBERNETES_SVC_IP: "10.96.0.1" 46 | 47 | #Kubernetes DNS 服务 IP (从 SERVICE_CIDR 中预分配) 48 | CLUSTER_DNS_SVC_IP: "10.96.0.2" 49 | 50 | #设置Node Port的端口范围 51 | NODE_PORT_RANGE: "30000-32767" 52 | 53 | #设置POD的IP地址段,在kube-controller-manager中定义cluster-cidr 54 | POD_CIDR: "10.244.0.0/16" 55 | CLUSTER_CIDR: "10.244.0.0/16" 56 | 57 | #设置集群的DNS域名 58 | CLUSTER_DNS_DOMAIN: "cluster.local." 59 | #已注释不在需要 60 | #设置Docker Registry地址 61 | #DOCKER_REGISTRY: "https://192.168.150.135:5000" 62 | #设置Master的VIP地址(必须修改) 63 | #MASTER_VIP: "192.168.150.253" 64 | 65 | #设置网卡名称,一定要改 66 | VIP_IF: "ens192" 67 | -------------------------------------------------------------------------------- /pillar/top.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: iokubernetes 4 | # Email: yang-li@live.cn 5 | # Organization: iokubernetes.github.io 6 | # Description: Pillar Top File 7 | #****************************************** 8 | 9 | base: 10 | '*': 11 | - k8s 12 | -------------------------------------------------------------------------------- /roster: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Salt SSH Roster 7 | #****************************************** 8 | 9 | c8-node1: 10 | host: 192.168.200.181 11 | user: root 12 | priv: /root/.ssh/id_rsa 13 | minion_opts: 14 | grains: 15 | worker-role: node 16 | etcd-role: node 17 | etcd-name: etcd-node1 18 | ca-file-role: admin 19 | kubelet-role: node 20 | k8s-role: master 21 | kubelet-bootstrap-role: admin 22 | calico-role: admin 23 | 24 | c8-node2: 25 | host: 192.168.200.182 26 | user: root 27 | priv: /root/.ssh/id_rsa 28 | minion_opts: 29 | grains: 30 | worker-role: node 31 | etcd-role: node 32 | etcd-name: etcd-node2 33 | k8s-role: master 34 | kubelet-role: node 35 | 36 | c8-node3: 37 | host: 192.168.200.183 38 | user: root 39 | priv: /root/.ssh/id_rsa 40 | minion_opts: 41 | grains: 42 | worker-role: node 43 | etcd-role: node 44 | etcd-name: etcd-node3 45 | k8s-role: master 46 | kubelet-role: node 47 | 48 | c8-node4: 49 | host: 192.168.200.184 50 | user: root 51 | priv: /root/.ssh/id_rsa 52 | minion_opts: 53 | grains: 54 | worker-role: node 55 | kubelet-role: node 56 | -------------------------------------------------------------------------------- /salt/k8s/baseset.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: iokubernetes 4 | # Email: yang-li@live.cn 5 | # Organization: iokubernetes.github.io 6 | # Description: ETCD Cluster 7 | #****************************************** 8 | include: 9 | - k8s.modules.baseos 10 | - k8s.modules.nginx -------------------------------------------------------------------------------- /salt/k8s/etcd.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: iokubernetes 4 | # Email: yang-li@live.cn 5 | # Organization: iokubernetes.github.io 6 | # Description: ETCD Cluster 7 | #****************************************** 8 | include: 9 | - k8s.modules.etcd 10 | -------------------------------------------------------------------------------- /salt/k8s/master.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Master 7 | #****************************************** 8 | include: 9 | - k8s.modules.api-server 10 | - k8s.modules.controller-manager 11 | - k8s.modules.scheduler 12 | 13 | -------------------------------------------------------------------------------- /salt/k8s/modules/api-server.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes API Server 7 | #****************************************** 8 | 9 | {% set k8s_version = "k8s-v1.18.2" %} 10 | 11 | #定义审计日志目录 12 | audit-log-dir: 13 | file.directory: 14 | - name: /var/log/kubernetes 15 | #定义加密配置文件 16 | api-auth-encryption-config: 17 | file.managed: 18 | - name: /etc/kubernetes/pki/encryption-config.yaml 19 | - source: salt://k8s/templates/kube-api-server/encryption-config.yaml.template 20 | - user: root 21 | - group: root 22 | - mode: 644 23 | - template: jinja 24 | - defaults: 25 | ENCRYPTION_KEY: {{ pillar['ENCRYPTION_KEY'] }} 26 | #审计策略文件 27 | kube-apiserver-audit-yaml: 28 | file.managed: 29 | - name: /etc/kubernetes/audit-policy.yaml 30 | - source: salt://k8s/templates/kube-api-server/audit-policy.yml.template 31 | - user: root 32 | - group: root 33 | - mode: 644 34 | 35 | #拷贝CA证书 36 | ca-pem-pki: 37 | file.managed: 38 | - user: root 39 | - group: root 40 | - mode: 644 41 | - name: /etc/kubernetes/pki/ca.pem 42 | - source: salt://k8s/files/cert/ca.pem 43 | 44 | 45 | ca-key-pem-pki: 46 | file.managed: 47 | - user: root 48 | - group: root 49 | - mode: 644 50 | - name: /etc/kubernetes/pki/ca-key.pem 51 | - source: salt://k8s/files/cert/ca-key.pem 52 | 53 | 54 | #拷贝apiserver-kubelet-client证书 55 | kube-apiserver-cert: 56 | file.managed: 57 | - user: root 58 | - group: root 59 | - mode: 644 60 | - name: /etc/kubernetes/pki/apiserver-kubelet-client.pem 61 | - source: salt://k8s/files/cert/apiserver-kubelet-client.pem 62 | 63 | 64 | kube-apiserver-cert-key: 65 | file.managed: 66 | - user: root 67 | - group: root 68 | - mode: 644 69 | - name: /etc/kubernetes/pki/apiserver-kubelet-client-key.pem 70 | - source: salt://k8s/files/cert/apiserver-kubelet-client-key.pem 71 | 72 | #拷贝metrics所使用的证书 73 | kubenetes-metrics-cert: 74 | file.managed: 75 | - user: root 76 | - group: root 77 | - mode: 644 78 | - name: /etc/kubernetes/pki/front-proxy-client.pem 79 | - source: salt://k8s/files/cert/front-proxy-client.pem 80 | 81 | 82 | kubenetes-metrics-cert-key: 83 | file.managed: 84 | - user: root 85 | - group: root 86 | - mode: 644 87 | - name: /etc/kubernetes/pki/front-proxy-client-key.pem 88 | - source: salt://k8s/files/cert/front-proxy-client-key.pem 89 | 90 | #拷贝kube-apiserver二进制文件 91 | kube-apiserver-bin: 92 | file.managed: 93 | - name: /usr/local/bin/kube-apiserver 94 | - source: salt://k8s/files/{{ k8s_version }}/bin/kube-apiserver 95 | - user: root 96 | - group: root 97 | - mode: 755 98 | - template: jinja 99 | 100 | 101 | kube-apiserver-service: 102 | file.managed: 103 | - name: /usr/lib/systemd/system/kube-apiserver.service 104 | - source: salt://k8s/templates/kube-api-server/kube-apiserver.service.template 105 | - user: root 106 | - group: root 107 | - mode: 644 108 | - template: jinja 109 | - defaults: 110 | NODE_IP: {{ pillar['NODE_IP'] }} 111 | SERVICE_CIDR: {{ pillar['SERVICE_CIDR'] }} 112 | NODE_PORT_RANGE: {{ pillar['NODE_PORT_RANGE'] }} 113 | ETCD_ENDPOINTS: {{ pillar['ETCD_ENDPOINTS'] }} 114 | cmd.run: 115 | - name: systemctl daemon-reload 116 | service.running: 117 | - name: kube-apiserver 118 | - enable: True 119 | - watch: 120 | - file: kube-apiserver-service 121 | -------------------------------------------------------------------------------- /salt/k8s/modules/baseos.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: System basic configuration 7 | #先设置系统基础配置 8 | #****************************************** 9 | 10 | 11 | #定义kubernetes配置文件目录 12 | kubernetes-etc-dir: 13 | file.directory: 14 | - name: /etc/kubernetes 15 | 16 | #定义证书存放目录 17 | kubernetes-pki-dir: 18 | file.directory: 19 | - name: /etc/kubernetes/pki 20 | 21 | #定义静态pod目录 22 | kubernetes-manifests-dir: 23 | file.directory: 24 | - name: /etc/kubernetes/manifests 25 | 26 | 27 | #关闭swap分区 28 | swap-off: 29 | cmd.run: 30 | - name: /usr/sbin/swapoff -a && /usr/sbin/sysctl -w vm.swappiness=0 && /usr/bin/sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab 31 | 32 | 33 | #关闭selinux以及firewalld,由于centos8使用NetworkManager来管理网络,所以不能禁用此服务 34 | firewalld-off: 35 | cmd.run: 36 | - name: /usr/bin/systemctl stop firewalld && /usr/bin/systemctl disable firewalld 37 | 38 | 39 | #安装依赖包 40 | init-pkg: 41 | pkg.installed: 42 | - names: 43 | - nfs-utils 44 | - socat 45 | - jq 46 | - psmisc 47 | - ipvsadm 48 | - ipset 49 | - sysstat 50 | - libseccomp 51 | - conntrack-tools 52 | - net-tools 53 | #使用ipvs转发的相关模块加载 54 | ipvs-modules-set: 55 | file.managed: 56 | - name: /etc/modules-load.d/ipvs.conf 57 | - source: salt://k8s/templates/baseos/ipvs.conf.template 58 | - user: root 59 | - group: root 60 | - mode: 644 61 | - template: jinja 62 | cmd.run: 63 | - name: /usr/bin/systemctl enable --now systemd-modules-load.service 64 | 65 | 66 | #优化k8s系统参数 67 | sysctl-k8s-conf: 68 | file.managed: 69 | - name: /etc/sysctl.d/k8s.conf 70 | - source: salt://k8s/templates/baseos/k8s.sysctl.conf.template 71 | - user: root 72 | - group: root 73 | - mode: 644 74 | - template: jinja 75 | cmd.run: 76 | - name: /usr/sbin/sysctl --system 77 | 78 | 79 | 80 | #修改systemctl启动的最小文件打开数量 81 | system-k8s-conf: 82 | cmd.run: 83 | - name: /usr/bin/sed -ri 's/^#(DefaultLimitCORE)=/\1=100000/' /etc/systemd/system.conf && /usr/bin/sed -ri 's/^#(DefaultLimitNOFILE)=/\1=100000/' /etc/systemd/system.conf 84 | 85 | 86 | #文件最大的打开数量 87 | limits-kubernetes-conf: 88 | file.managed: 89 | - name: /etc/security/limits.d/kubernetes.conf 90 | - source: salt://k8s/templates/baseos/kubernetes.limits.conf.template 91 | - user: root 92 | - group: root 93 | - mode: 644 94 | - template: jinja -------------------------------------------------------------------------------- /salt/k8s/modules/calico.sls: -------------------------------------------------------------------------------- 1 | #这里我们使用calico网络插件 2 | calico-yaml-install: 3 | file.managed: 4 | - name: /etc/kubernetes/calico.yaml 5 | - source: salt://k8s/templates/calico/calico.yaml.template 6 | - user: root 7 | - group: root 8 | - mode: 644 9 | - template: jinja 10 | - defaults: 11 | POD_CIDR: {{ pillar['POD_CIDR'] }} 12 | VIP_IF: {{ pillar['VIP_IF'] }} 13 | cmd.run: 14 | - name: /usr/local/bin/kubectl apply -f /etc/kubernetes/calico.yaml 15 | 16 | #这里部署coredns插件 17 | coredns-yaml-install: 18 | file.managed: 19 | - name: /etc/kubernetes/coredns.yaml 20 | - source: salt://k8s/templates/calico/coredns.yaml.template 21 | - user: root 22 | - group: root 23 | - mode: 644 24 | - template: jinja 25 | - defaults: 26 | CLUSTER_DNS_DOMAIN: {{ pillar['CLUSTER_DNS_DOMAIN'] }} 27 | CLUSTER_DNS_SVC_IP: {{ pillar['CLUSTER_DNS_SVC_IP'] }} 28 | cmd.run: 29 | - name: /usr/local/bin/kubectl apply -f /etc/kubernetes/coredns.yaml -------------------------------------------------------------------------------- /salt/k8s/modules/cfssl.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: CfSSL Tools 7 | #****************************************** 8 | {% set cfssl_version = "1.4.1" %} 9 | {% set k8s_version = "k8s-v1.18.2" %} 10 | 11 | 12 | #安装cfssl工具集 13 | cfssl-certinfo: 14 | file.managed: 15 | - name: /usr/local/bin/cfssl-certinfo 16 | - source: salt://k8s/files/cfssl/cfssl-certinfo_{{ cfssl_version }}_linux_amd64 17 | - user: root 18 | - group: root 19 | - mode: 755 20 | 21 | cfssl-json: 22 | file.managed: 23 | - name: /usr/local/bin/cfssljson 24 | - source: salt://k8s/files/cfssl/cfssljson_{{ cfssl_version }}_linux_amd64 25 | - user: root 26 | - group: root 27 | - mode: 755 28 | 29 | cfssl: 30 | file.managed: 31 | - name: /usr/local/bin/cfssl 32 | - source: salt://k8s/files/cfssl/cfssl_{{ cfssl_version }}_linux_amd64 33 | - user: root 34 | - group: root 35 | - mode: 755 36 | 37 | #配置kubectl 38 | kubectl-bin: 39 | file.managed: 40 | - name: /usr/local/bin/kubectl 41 | - source: salt://k8s/files/{{ k8s_version }}/bin/kubectl 42 | - user: root 43 | - group: root 44 | - mode: 755 45 | #配置opensshl工具 46 | init-pkg: 47 | pkg.installed: 48 | - names: 49 | - openssl 50 | - openssl-devel -------------------------------------------------------------------------------- /salt/k8s/modules/cni.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: iokubernetes 4 | # Email: yang-li@live.cn 5 | # Organization: iokubernetes.github.io 6 | # Description: CNI For Kubernetes 7 | #****************************************** 8 | {% set cni_version = "cni-plugins-linux-amd64-v0.8.6" %} 9 | 10 | cni-dir-net: 11 | file.directory: 12 | - name: /etc/cni/net.d 13 | - makedirs: True 14 | 15 | 16 | cni-bin-dir: 17 | file.directory: 18 | - name: /opt/cni/bin 19 | - makedirs: True 20 | 21 | 22 | cni-bin: 23 | file.recurse: 24 | - name: /opt/cni/bin 25 | - source: salt://k8s/files/{{ cni_version }}/ 26 | - user: root 27 | - group: root 28 | - file_mode: 755 29 | -------------------------------------------------------------------------------- /salt/k8s/modules/controller-manager.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #******************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Controller Manager 7 | #******************************************** 8 | {% set k8s_version = "k8s-v1.18.2" %} 9 | 10 | 11 | 12 | 13 | 14 | 15 | #拷贝二进制文件 16 | kube-controller-manager-bin: 17 | file.managed: 18 | - name: /usr/local/bin/kube-controller-manager 19 | - source: salt://k8s/files/{{ k8s_version }}/bin/kube-controller-manager 20 | - user: root 21 | - group: root 22 | - mode: 755 23 | 24 | #拷贝kubeconfig文件 25 | kube-controller-manager-kubeconfig: 26 | file.managed: 27 | - name: /etc/kubernetes/controller-manager.conf 28 | - source: salt://k8s/files/cert/controller-manager.conf 29 | - user: root 30 | - group: root 31 | - mode: 644 32 | 33 | 34 | kube-controller-manager-service: 35 | file.managed: 36 | - name: /usr/lib/systemd/system/kube-controller-manager.service 37 | - source: salt://k8s/templates/kube-controller-manager/kube-controller-manager.service.template 38 | - user: root 39 | - group: root 40 | - mode: 644 41 | - template: jinja 42 | - defaults: 43 | SERVICE_CIDR: {{ pillar['SERVICE_CIDR'] }} 44 | cmd.run: 45 | - name: systemctl daemon-reload 46 | service.running: 47 | - name: kube-controller-manager 48 | - enable: True 49 | - watch: 50 | - file: kube-controller-manager-service 51 | -------------------------------------------------------------------------------- /salt/k8s/modules/docker.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Docker Install 7 | #****************************************** 8 | 9 | 10 | #安装docker 11 | docker-install: 12 | cmd.run: 13 | - name: yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && yum install -y https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/edge/Packages/containerd.io-1.2.13-3.1.el7.x86_64.rpm 14 | pkg.installed: 15 | - name: docker-ce 16 | - version: 3:19.03.9-3.el7 17 | - allow_updates: True 18 | 19 | #创建docker配置文件目录 20 | docker-config-dir: 21 | file.directory: 22 | - name: /etc/docker 23 | 24 | 25 | #docker命令补全 26 | docker-bash-completion: 27 | pkg.installed: 28 | - name: bash-completion 29 | cmd.run: 30 | - name: /bin/cp /usr/share/bash-completion/completions/docker /etc/bash_completion.d/ 31 | 32 | #配置文件创建 33 | docker-daemon-config: 34 | file.managed: 35 | - name: /etc/docker/daemon.json 36 | - source: salt://k8s/templates/docker/daemon.json.template 37 | - user: root 38 | - group: root 39 | - mode: 644 40 | 41 | #定义服务启动 42 | systemctl-docker-service.d: 43 | file.directory: 44 | - name: /etc/systemd/system/docker.service.d 45 | #防止FORWARD的DROP策略影响转发,给docker daemon添加下列参数修正,当然暴力点也可以iptables -P FORWARD ACCEPT,具体参数查看10-docker.conf配置文件 46 | docker-service: 47 | file.managed: 48 | - name: /etc/systemd/system/docker.service.d/10-docker.conf 49 | - source: salt://k8s/templates/docker/10-docker.conf.template 50 | - user: root 51 | - group: root 52 | - mode: 755 53 | cmd.run: 54 | - name: systemctl daemon-reload 55 | service.running: 56 | - name: docker 57 | - enable: True 58 | - watch: 59 | - file: docker-daemon-config 60 | 61 | -------------------------------------------------------------------------------- /salt/k8s/modules/etcd.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: ETCD Cluster 7 | #****************************************** 8 | {% set etcd_version = "etcd-v3.4.9-linux-amd64" %} 9 | 10 | 11 | 12 | etcd-bin: 13 | file.managed: 14 | - name: /usr/local/bin/etcd 15 | - source: salt://k8s/files/{{ etcd_version }}/etcd 16 | - user: root 17 | - group: root 18 | - mode: 755 19 | 20 | etcdctl-bin: 21 | file.managed: 22 | - name: /usr/local/bin/etcdctl 23 | - source: salt://k8s/files/{{ etcd_version }}/etcdctl 24 | - user: root 25 | - group: root 26 | - mode: 755 27 | 28 | etcd-dir: 29 | file.directory: 30 | - name: /var/lib/etcd 31 | etcd-wal-dir: 32 | file.directory: 33 | - name: /var/lib/etcd/wal 34 | etcd-config-dir: 35 | file.directory: 36 | - name: /etc/etcd 37 | 38 | 39 | etcd-service: 40 | file.managed: 41 | - name: /usr/lib/systemd/system/etcd.service 42 | - source: salt://k8s/templates/etcd/etcd.service.template 43 | - user: root 44 | - group: root 45 | - mode: 644 46 | - template: jinja 47 | - defaults: 48 | NODE_IP: {{ grains['fqdn_ip4'][0] }} 49 | ETCD_NAME: {{ grains['etcd-name'] }} 50 | ETCD_CLUSTER: {{ pillar['ETCD_CLUSTER'] }} 51 | cmd.run: 52 | - name: systemctl daemon-reload 53 | - watch: 54 | - file: etcd-service 55 | service.running: 56 | - name: etcd 57 | - enable: True 58 | - watch: 59 | - file: etcd-service 60 | -------------------------------------------------------------------------------- /salt/k8s/modules/kube-proxy.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # # Description: Kubernetes Proxy 7 | # #****************************************** 8 | 9 | {% set k8s_version = "k8s-v1.18.2" %} 10 | 11 | 12 | kube-proxy-workdir: 13 | file.directory: 14 | - name: /var/lib/kube-proxy 15 | 16 | 17 | #拷贝kube-proxy kubeconfig配置文件 18 | 19 | kube-proxy-kubeconfig: 20 | file.managed: 21 | - user: root 22 | - group: root 23 | - mode: 644 24 | - name: /etc/kubernetes/proxy.config 25 | - source: salt://k8s/files/cert/proxy.config 26 | 27 | kube-proxy-bin: 28 | file.managed: 29 | - name: /usr/local/bin/kube-proxy 30 | - source: salt://k8s/files/{{ k8s_version }}/bin/kube-proxy 31 | - user: root 32 | - group: root 33 | - mode: 755 34 | 35 | kube-proxy-config-yaml: 36 | file.managed: 37 | - name: /etc/kubernetes/kube-proxy.config.yaml 38 | - source: salt://k8s/templates/kube-proxy/kube-proxy.config.yaml.template 39 | - user: root 40 | - group: root 41 | - mode: 644 42 | - template: jinja 43 | - defaults: 44 | CLUSTER_CIDR: {{ pillar['CLUSTER_CIDR'] }} 45 | 46 | 47 | kube-proxy-service: 48 | file.managed: 49 | - name: /usr/lib/systemd/system/kube-proxy.service 50 | - source: salt://k8s/templates/kube-proxy/kube-proxy.service.template 51 | - user: root 52 | - group: root 53 | - mode: 644 54 | - template: jinja 55 | - defaults: 56 | HOST_NAME: {{ pillar['HOST_NAME'] }} 57 | cmd.run: 58 | - name: systemctl daemon-reload 59 | service.running: 60 | - name: kube-proxy 61 | - enable: True 62 | - watch: 63 | - file: kube-proxy-service 64 | -------------------------------------------------------------------------------- /salt/k8s/modules/kubectl.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: iokubernetes 4 | # Email: yang-li@live.cn 5 | # Organization: iokubernetes.github.io 6 | # Description: Kubernetes kubectl 7 | #****************************************** 8 | 9 | {% set k8s_version = "k8s-v1.15.4" %} 10 | 11 | 12 | 13 | 14 | kubectl-admin-cluster: 15 | cmd.run: 16 | - name: cd /opt/kubernetes/cfg && /opt/kubernetes/bin/kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server={{ pillar['KUBE_APISERVER'] }} --kubeconfig=kubectl.kubeconfig 17 | 18 | kubectl-admin-credentials: 19 | cmd.run: 20 | - name: cd /opt/kubernetes/cfg && /opt/kubernetes/bin/kubectl config set-credentials admin --client-certificate=/opt/kubernetes/ssl/admin.pem --embed-certs=true --client-key=/opt/kubernetes/ssl/admin-key.pem --kubeconfig=kubectl.kubeconfig 21 | 22 | kubectl-admin-context: 23 | cmd.run: 24 | - name: cd /opt/kubernetes/cfg && /opt/kubernetes/bin/kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kubectl.kubeconfig 25 | 26 | kubectl-admin-use: 27 | cmd.run: 28 | - name: cd /opt/kubernetes/cfg && /opt/kubernetes/bin/kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig && mkdir -p ~/.kube && /bin/cp /opt/kubernetes/cfg/kubectl.kubeconfig ~/.kube/config 29 | -------------------------------------------------------------------------------- /salt/k8s/modules/kubelet-bootstrap-kubeconfig.sls: -------------------------------------------------------------------------------- 1 | #创建 kubelet bootstrap kubeconfig 文件 2 | #向 kubeconfig 写入的是 token,bootstrap 结束后 kube-controller-manager 为 kubelet 创建 client 和 server 证书; 3 | #由于此证书与需要创建tocker,需要再api-server是可通信的情况下生成。因此再master的admin节点中进行操作 4 | 5 | kubelet-bootstrap-kubeconfig: 6 | file.managed: 7 | - name: /etc/kubernetes/sslcert/tls-bootstrap-secret-kubeconfig.sh 8 | - source: salt://k8s/templates/ca/tls-bootstrap-secret-kubeconfig.sh.template 9 | - user: root 10 | - group: root 11 | - mode: 755 12 | - template: jinja 13 | - defaults: 14 | KUBE_APISERVER: {{ pillar["KUBE_APISERVER"] }} 15 | TOKEN_ID: {{ pillar["TOKEN_ID"] }} 16 | TOKEN_SECRET: {{ pillar["TOKEN_SECRET"] }} 17 | BOOTSTRAP_TOKEN: {{ pillar["BOOTSTRAP_TOKEN"] }} 18 | cmd.run: 19 | - name: /bin/bash /etc/kubernetes/sslcert/tls-bootstrap-secret-kubeconfig.sh 20 | - unless: test -f /etc/kubernetes/sslcert/bootstrap.kubeconfig 21 | 22 | kubelet-bootstrap-kubeconfig-cp: 23 | file.copy: 24 | - user: root 25 | - group: root 26 | - mode: 644 27 | - name: /srv/salt/k8s/files/cert/bootstrap-kubelet.conf 28 | - source: /etc/kubernetes/sslcert/bootstrap.kubeconfig 29 | - force: True 30 | 31 | 32 | #在k8s-m1建立 TLS Bootstrap Autoapprove RBAC来自动处理 CSR 33 | kubelet-bootstrap-rbac: 34 | file.managed: 35 | - name: /etc/kubernetes/csr-crb.yaml 36 | - source: salt://k8s/templates/kube-api-server/csr-crb.yaml 37 | - user: root 38 | - group: root 39 | - mode: 644 40 | - template: jinja 41 | cmd.run: 42 | - name: /usr/local/bin/kubectl create -f /etc/kubernetes/csr-crb.yaml 43 | 44 | #授予 kube-apiserver 访问 kubelet API 的权限 45 | #在执行 kubectl exec、run、logs 等命令时,apiserver 会将请求转发到 kubelet 的 https 端口。 46 | #这里定义 RBAC 规则,授权 apiserver 使用的证书(kubernetes.pem) 47 | #用户名(CN:kuberntes)访问 kubelet API 的权限 48 | apiserver-to-kubelet-rbac: 49 | file.managed: 50 | - name: /etc/kubernetes/apiserver-to-kubelet-rbac.yaml 51 | - source: salt://k8s/templates/kube-api-server/apiserver-to-kubelet-rbac.yml.template 52 | - user: root 53 | - group: root 54 | - mode: 644 55 | - template: jinja 56 | cmd.run: 57 | - name: /usr/local/bin/kubectl create -f /etc/kubernetes/apiserver-to-kubelet-rbac.yaml 58 | 59 | #--authentication-kubeconfig 和 --authorization-kubeconfig 参数指定的证书需要有创建 "subjectaccessreviews" 的权限 60 | # kube-controller-manager-clusterrole: 61 | # cmd.run: 62 | # - name: /opt/kubernetes/bin/kubectl create clusterrolebinding controller-manager:system:auth-delegator --user system:kube-controller-manager --clusterrole system:auth-delegator #} -------------------------------------------------------------------------------- /salt/k8s/modules/kubelet.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Node kubelet 7 | #****************************************** 8 | 9 | {% set k8s_version = "k8s-v1.18.2" %} 10 | 11 | include: 12 | - k8s.modules.cni 13 | - k8s.modules.docker 14 | 15 | kubelet-workdir: 16 | file.directory: 17 | - name: /var/lib/kubelet 18 | - mode: 755 19 | kubelet-service-d: 20 | file.directory: 21 | - name: /usr/lib/systemd/system/kubelet.service.d 22 | - mode: 755 23 | 24 | 25 | #创建 kubelet bootstrap kubeconfig 文件 26 | kubelet-bootstrap-kubeconfig: 27 | file.managed: 28 | - name: /etc/kubernetes/bootstrap-kubelet.conf 29 | - source: salt://k8s/files/cert/bootstrap-kubelet.conf 30 | - user: root 31 | - group: root 32 | - mode: 644 33 | 34 | #拷贝CA证书 35 | ca-pem-pki: 36 | file.managed: 37 | - user: root 38 | - group: root 39 | - mode: 644 40 | - name: /etc/kubernetes/pki/ca.pem 41 | - source: salt://k8s/files/cert/ca.pem 42 | - replace: False 43 | 44 | 45 | ca-key-pem-pki: 46 | file.managed: 47 | - user: root 48 | - group: root 49 | - mode: 644 50 | - name: /etc/kubernetes/pki/ca-key.pem 51 | - source: salt://k8s/files/cert/ca-key.pem 52 | - replace: False 53 | 54 | kubelet-bin: 55 | file.managed: 56 | - name: /usr/local/bin/kubelet 57 | - source: salt://k8s/files/{{ k8s_version }}/bin/kubelet 58 | - user: root 59 | - group: root 60 | - mode: 755 61 | kubelet-kubeadm-conf: 62 | file.managed: 63 | - name: /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf 64 | - source: salt://k8s/templates/kubelet/10-kubeadm.conf.template 65 | - user: root 66 | - group: root 67 | - mode: 755 68 | - template: jinja 69 | - defaults: 70 | HOST_NAME: {{ pillar['HOST_NAME'] }} 71 | kubelet-config-yaml: 72 | file.managed: 73 | - name: /var/lib/kubelet/config.yaml 74 | - source: salt://k8s/templates/kubelet/kubelet-conf.yml.template 75 | - user: root 76 | - group: root 77 | - mode: 644 78 | - template: jinja 79 | - defaults: 80 | CLUSTER_DNS_SVC_IP: {{ pillar['CLUSTER_DNS_SVC_IP'] }} 81 | CLUSTER_DNS_DOMAIN: {{ pillar['CLUSTER_DNS_DOMAIN'] }} 82 | kubelet-service: 83 | file.managed: 84 | - name: /usr/lib/systemd/system/kubelet.service 85 | - source: salt://k8s/templates/kubelet/kubelet.service.template 86 | - user: root 87 | - group: root 88 | - mode: 644 89 | cmd.run: 90 | - name: systemctl daemon-reload 91 | service.running: 92 | - name: kubelet 93 | - enable: True 94 | - watch: 95 | - file: kubelet-kubeadm-conf 96 | -------------------------------------------------------------------------------- /salt/k8s/modules/nginx.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Nginx Install 7 | #****************************************** 8 | #nginx使用stable版本 9 | {% set nginx_version = "nginx-1.18.0" %} 10 | 11 | 12 | nginx-dir: 13 | file.directory: 14 | - name: /usr/local/kube-nginx 15 | nginx-install: 16 | pkg.installed: 17 | - names: 18 | - gcc 19 | - gcc-c++ 20 | - make 21 | file.managed: 22 | - name: /usr/local/src/{{ nginx_version }}.tar.gz 23 | - source: salt://k8s/files/{{ nginx_version }}/{{ nginx_version }}.tar.gz 24 | - user: root 25 | - group: root 26 | - mode: 644 27 | - template: jinja 28 | cmd.run: 29 | - name: cd /usr/local/src && tar -zxvf /usr/local/src/{{ nginx_version }}.tar.gz && cd /usr/local/src/{{ nginx_version }} && ./configure --with-stream --without-http --prefix=/usr/local/kube-nginx --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module && make && make install 30 | - unless: test -f /usr/local/kube-nginx/sbin/nginx 31 | nginx-config: 32 | file.managed: 33 | - name: /usr/local/kube-nginx/conf/kube-nginx.conf 34 | - source: salt://k8s/templates/nginx/kube-nginx.conf.template 35 | - user: root 36 | - group: root 37 | - mode: 644 38 | - template: jinja 39 | - defaults: 40 | MASTER_H1: {{ pillar['MASTER_H1'] }} 41 | MASTER_H2: {{ pillar['MASTER_H2'] }} 42 | MASTER_H3: {{ pillar['MASTER_H3'] }} 43 | nginx-service: 44 | file.managed: 45 | - name: /usr/lib/systemd/system/kube-nginx.service 46 | - source: salt://k8s/templates/nginx/kube-nginx.service.template 47 | - user: root 48 | - group: root 49 | - mode: 644 50 | - template: jinja 51 | service.running: 52 | - name: kube-nginx 53 | - enable: True 54 | - watch: 55 | - file: nginx-config 56 | -------------------------------------------------------------------------------- /salt/k8s/modules/scheduler.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Scheduler 7 | #****************************************** 8 | 9 | {% set k8s_version = "k8s-v1.18.2" %} 10 | 11 | 12 | 13 | kube-scheduler-bin: 14 | file.managed: 15 | - name: /usr/local/bin/kube-scheduler 16 | - source: salt://k8s/files/{{ k8s_version }}/bin/kube-scheduler 17 | - user: root 18 | - group: root 19 | - mode: 755 20 | 21 | 22 | #拷贝kube-scheduler的kubeconfig文件 23 | kube-scheduler-kubeconfig: 24 | file.managed: 25 | - name: /etc/kubernetes/scheduler.conf 26 | - source: salt://k8s/files/cert/scheduler.conf 27 | - user: root 28 | - group: root 29 | - mode: 755 30 | 31 | kube-scheduler-service: 32 | file.managed: 33 | - name: /usr/lib/systemd/system/kube-scheduler.service 34 | - source: salt://k8s/templates/kube-scheduler/kube-scheduler.service.template 35 | - user: root 36 | - group: root 37 | - mode: 644 38 | - template: jinja 39 | cmd.run: 40 | - name: systemctl daemon-reload 41 | - watch: 42 | - file: kube-scheduler-service 43 | service.running: 44 | - name: kube-scheduler 45 | - enable: True 46 | - watch: 47 | - file: kube-scheduler-service 48 | -------------------------------------------------------------------------------- /salt/k8s/node.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Node 7 | #****************************************** 8 | 9 | include: 10 | - k8s.modules.kubelet 11 | - k8s.modules.kube-proxy 12 | 13 | #kubectl-csr: 14 | # cmd.run: 15 | # - name: /opt/kubernetes/bin/kubectl get csr | grep 'Pending' | awk 'NR>0{print $1}'| xargs /opt/kubernetes/bin/kubectl certificate approve 16 | # - onlyif: /opt/kubernetes/bin/kubectl get csr | grep 'Pending' 17 | -------------------------------------------------------------------------------- /salt/k8s/templates/baseos/99-prophet.conf.template: -------------------------------------------------------------------------------- 1 | [Journal] 2 | # 持久化保存到磁盘 3 | Storage=persistent 4 | 5 | # 压缩历史日志 6 | Compress=yes 7 | 8 | SyncIntervalSec=5m 9 | RateLimitInterval=30s 10 | RateLimitBurst=1000 11 | 12 | # 最大占用空间 10G 13 | SystemMaxUse=5G 14 | 15 | # 单日志文件最大 200M 16 | SystemMaxFileSize=200M 17 | 18 | # 日志保存时间 2 周 19 | MaxRetentionSec=2week 20 | 21 | # 不将日志转发到 syslog 22 | ForwardToSyslog=no 23 | -------------------------------------------------------------------------------- /salt/k8s/templates/baseos/ipvs.conf.template: -------------------------------------------------------------------------------- 1 | ip_vs 2 | ip_vs_rr 3 | ip_vs_wrr 4 | ip_vs_sh 5 | nf_conntrack 6 | br_netfilter 7 | -------------------------------------------------------------------------------- /salt/k8s/templates/baseos/k8s.sysctl.conf.template: -------------------------------------------------------------------------------- 1 | # https://github.com/moby/moby/issues/31208 2 | # ipvsadm -l --timout 3 | # 修复ipvs模式下长连接timeout问题 小于900即可 4 | net.ipv4.tcp_keepalive_time = 600 5 | net.ipv4.tcp_keepalive_intvl = 30 6 | net.ipv4.tcp_keepalive_probes = 10 7 | #禁用ipv6 8 | net.ipv6.conf.all.disable_ipv6 = 1 9 | net.ipv6.conf.default.disable_ipv6 = 1 10 | net.ipv6.conf.lo.disable_ipv6 = 1 11 | net.ipv4.neigh.default.gc_stale_time = 120 12 | net.ipv4.conf.all.rp_filter = 0 13 | net.ipv4.conf.default.rp_filter = 0 14 | net.ipv4.conf.default.arp_announce = 2 15 | net.ipv4.conf.lo.arp_announce = 2 16 | net.ipv4.conf.all.arp_announce = 2 17 | net.ipv4.ip_forward = 1 18 | net.ipv4.tcp_max_tw_buckets = 5000 19 | net.ipv4.tcp_syncookies = 1 20 | net.ipv4.tcp_max_syn_backlog = 1024 21 | net.ipv4.tcp_synack_retries = 2 22 | # 要求iptables不对bridge的数据进行处理 23 | net.bridge.bridge-nf-call-ip6tables = 1 24 | net.bridge.bridge-nf-call-iptables = 1 25 | net.bridge.bridge-nf-call-arptables = 1 26 | net.netfilter.nf_conntrack_max = 2310720 27 | fs.inotify.max_user_watches=89100 28 | fs.may_detach_mounts = 1 29 | fs.file-max = 52706963 30 | fs.nr_open = 52706963 31 | vm.overcommit_memory=1 32 | vm.panic_on_oom=0 33 | -------------------------------------------------------------------------------- /salt/k8s/templates/baseos/kubernetes.limits.conf.template: -------------------------------------------------------------------------------- 1 | * soft nproc 131072 2 | * hard nproc 131072 3 | * soft nofile 131072 4 | * hard nofile 131072 5 | root soft nproc 131072 6 | root hard nproc 131072 7 | root soft nofile 131072 8 | root hard nofile 131072 -------------------------------------------------------------------------------- /salt/k8s/templates/ca/admin-csr.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes-admin", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "system:masters", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | 19 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "876000h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth" 13 | ], 14 | "expiry": "876000h" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "CN", 10 | "ST": "BeiJing", 11 | "L": "BeiJing", 12 | "O": "k8s", 13 | "OU": "System" 14 | } 15 | ], 16 | "ca": { 17 | "expiry": "876000h" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/front-proxy-client-csr.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "front-proxy-client", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "k8s", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/kube-controller-manager-csr.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-controller-manager", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "hosts": [ 8 | "127.0.0.1", 9 | "{{ KUBE_APISERVER_DNS_NAME }}", 10 | "{{ MASTER_IP_M1 }}", 11 | "{{ MASTER_IP_M2 }}", 12 | "{{ MASTER_IP_M3 }}", 13 | "{{ MASTER_H1 }}", 14 | "{{ MASTER_H2 }}", 15 | "{{ MASTER_H3 }}" 16 | ], 17 | "names": [ 18 | { 19 | "C": "CN", 20 | "ST": "BeiJing", 21 | "L": "BeiJing", 22 | "O": "system:kube-controller-manager", 23 | "OU": "System" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/kube-proxy-csr.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-proxy", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "k8s", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | 19 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/kube-scheduler-csr.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-scheduler", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "{{ KUBE_APISERVER_DNS_NAME }}", 6 | "{{ MASTER_IP_M1 }}", 7 | "{{ MASTER_IP_M2 }}", 8 | "{{ MASTER_IP_M3 }}", 9 | "{{ MASTER_H1 }}", 10 | "{{ MASTER_H2 }}", 11 | "{{ MASTER_H3 }}" 12 | ], 13 | "key": { 14 | "algo": "rsa", 15 | "size": 2048 16 | }, 17 | "names": [ 18 | { 19 | "C": "CN", 20 | "ST": "BeiJing", 21 | "L": "BeiJing", 22 | "O": "system:kube-scheduler", 23 | "OU": "System" 24 | } 25 | ] 26 | } -------------------------------------------------------------------------------- /salt/k8s/templates/ca/kubernetes-csr.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "{{ MASTER_IP_M1 }}", 6 | "{{ MASTER_IP_M2 }}", 7 | "{{ MASTER_IP_M3 }}", 8 | "{{ MASTER_H1 }}", 9 | "{{ MASTER_H2 }}", 10 | "{{ MASTER_H3 }}", 11 | "{{ KUBE_APISERVER_DNS_NAME }}", 12 | "{{ CLUSTER_KUBERNETES_SVC_IP }}", 13 | "kubernetes", 14 | "kubernetes.default", 15 | "kubernetes.default.svc", 16 | "kubernetes.default.svc.cluster", 17 | "kubernetes.default.svc.cluster.local" 18 | ], 19 | "key": { 20 | "algo": "rsa", 21 | "size": 2048 22 | }, 23 | "names": [ 24 | { 25 | "C": "CN", 26 | "ST": "BeiJing", 27 | "L": "BeiJing", 28 | "O": "k8s", 29 | "OU": "System" 30 | } 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /salt/k8s/templates/ca/tls-bootstrap-secret-kubeconfig.sh.template: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #设置kubeelet bootstrap 3 | TOKEN_PUB={{ TOKEN_ID }} 4 | TOKEN_SECRET={{ TOKEN_SECRET }} 5 | BOOTSTRAP_TOKEN={{ BOOTSTRAP_TOKEN }} 6 | 7 | kubectl -n kube-system create secret generic bootstrap-token-${TOKEN_PUB} \ 8 | --type 'bootstrap.kubernetes.io/token' \ 9 | --from-literal description="cluster bootstrap token" \ 10 | --from-literal token-id=${TOKEN_PUB} \ 11 | --from-literal token-secret=${TOKEN_SECRET} \ 12 | --from-literal usage-bootstrap-authentication=true \ 13 | --from-literal usage-bootstrap-signing=true 14 | 15 | 16 | 17 | CLUSTER_NAME="kubernetes" 18 | KUBE_USER="kubelet-bootstrap" 19 | KUBE_CONFIG="bootstrap.kubeconfig" 20 | 21 | # 设置集群参数 22 | /usr/local/bin/kubectl config set-cluster ${CLUSTER_NAME} \ 23 | --certificate-authority=/etc/kubernetes/sslcert/ca.pem \ 24 | --embed-certs=true \ 25 | --server={{ KUBE_APISERVER }} \ 26 | --kubeconfig=/etc/kubernetes/sslcert/${KUBE_CONFIG} 27 | 28 | # 设置上下文参数 29 | /usr/local/bin/kubectl config set-context ${KUBE_USER}@${CLUSTER_NAME} \ 30 | --cluster=${CLUSTER_NAME} \ 31 | --user=${KUBE_USER} \ 32 | --kubeconfig=/etc/kubernetes/sslcert/${KUBE_CONFIG} 33 | 34 | # 设置客户端认证参数 35 | /usr/local/bin/kubectl config set-credentials ${KUBE_USER} \ 36 | --token=${BOOTSTRAP_TOKEN} \ 37 | --kubeconfig=/etc/kubernetes/sslcert/${KUBE_CONFIG} 38 | 39 | 40 | # 设置当前使用的上下文 41 | /usr/local/bin/kubectl config use-context ${KUBE_USER}@${CLUSTER_NAME} --kubeconfig=/etc/kubernetes/sslcert/${KUBE_CONFIG} 42 | 43 | 44 | #Bootstrap Token Auth 和授予权限 45 | #授权 kubelet 可以创建 csr 46 | #参考链接https://github.com/opsnull/follow-me-install-kubernetes-cluster/blob/master/06-4.kubelet.md 47 | /usr/local/bin/kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers -------------------------------------------------------------------------------- /salt/k8s/templates/cni/10-flannel.conflist.template: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cbr0", 3 | "plugins": [ 4 | { 5 | "type": "flannel", 6 | "delegate": { 7 | "hairpinMode": true, 8 | "isDefaultGateway": true 9 | } 10 | }, 11 | { 12 | "type": "portmap", 13 | "capabilities": { 14 | "portMappings": true 15 | } 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /salt/k8s/templates/docker/10-docker.conf.template: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT 3 | ExecStopPost=/bin/bash -c '/sbin/iptables -D FORWARD -s 0.0.0.0/0 -j ACCEPT &> /dev/null || :' -------------------------------------------------------------------------------- /salt/k8s/templates/docker/daemon.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "exec-opts": ["native.cgroupdriver=systemd"], 3 | "log-driver": "json-file", 4 | "log-opts": { 5 | "max-size": "100m", 6 | "max-file": "10" 7 | }, 8 | "bip": "169.254.123.1/24", 9 | "oom-score-adjust": -1000, 10 | "registry-mirrors": ["https://dx5z2hy7.mirror.aliyuncs.com"], 11 | "storage-driver": "overlay2", 12 | "storage-opts":["overlay2.override_kernel_check=true"] 13 | } -------------------------------------------------------------------------------- /salt/k8s/templates/etcd/etcd.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Service 3 | Documentation=https://coreos.com/etcd/docs/latest/ 4 | After=NetworkManager.target 5 | 6 | [Service] 7 | Type=notify 8 | ExecStart=/usr/local/bin/etcd \ 9 | --auto-compaction-retention '1h' --max-request-bytes '33554432' --auto-compaction-mode 'periodic' --quota-backend-bytes '8589934592' \ 10 | --heartbeat-interval=250 \ 11 | --election-timeout=2000 \ 12 | --data-dir=/var/lib/etcd \ 13 | --wal-dir=/var/lib/etcd/wal \ 14 | --name={{ ETCD_NAME }} \ 15 | --listen-client-urls http://{{ NODE_IP }}:2379,http://127.0.0.1:2379 \ 16 | --listen-peer-urls http://{{ NODE_IP }}:2380 \ 17 | --initial-advertise-peer-urls http://{{ NODE_IP }}:2380 \ 18 | --advertise-client-urls http://{{ NODE_IP }}:2379 \ 19 | --initial-cluster-token etcd-k8s-cluster \ 20 | --initial-cluster "{{ ETCD_CLUSTER }}" \ 21 | --initial-cluster-state new 22 | Restart=on-failure 23 | RestartSec=10 24 | LimitNOFILE=65536 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | Alias=etcd3.service 29 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-api-server/apiserver-to-kubelet-rbac.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: 5 | rbac.authorization.kubernetes.io/autoupdate: "true" 6 | labels: 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:kube-apiserver-to-kubelet 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - nodes/proxy 14 | - nodes/stats 15 | - nodes/log 16 | - nodes/spec 17 | - nodes/metrics 18 | verbs: 19 | - "*" 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: ClusterRoleBinding 23 | metadata: 24 | name: system:kube-apiserver 25 | namespace: "" 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: system:kube-apiserver-to-kubelet 30 | subjects: 31 | - apiGroup: rbac.authorization.k8s.io 32 | kind: User 33 | name: kubernetes -------------------------------------------------------------------------------- /salt/k8s/templates/kube-api-server/bootstrap-token-secret.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: bootstrap-token-{{ TOKEN_ID }} 5 | namespace: kube-system 6 | type: bootstrap.kubernetes.io/token 7 | stringData: 8 | token-id: {{ TOKEN_ID }} 9 | token-secret: {{ TOKEN_SECRET }} 10 | usage-bootstrap-authentication: "true" 11 | usage-bootstrap-signing: "true" 12 | auth-extra-groups: system:bootstrappers:default-node-token -------------------------------------------------------------------------------- /salt/k8s/templates/kube-api-server/csr-crb.yaml: -------------------------------------------------------------------------------- 1 | # Approve all CSRs for the group "system:bootstrappers" 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: auto-approve-csrs-for-group 6 | subjects: 7 | - kind: Group 8 | name: system:bootstrappers 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: ClusterRole 12 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient 13 | apiGroup: rbac.authorization.k8s.io 14 | --- 15 | # To let a node of the group "system:nodes" renew its own credentials 16 | kind: ClusterRoleBinding 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | metadata: 19 | name: node-client-cert-renewal 20 | subjects: 21 | - kind: Group 22 | name: system:nodes 23 | apiGroup: rbac.authorization.k8s.io 24 | roleRef: 25 | kind: ClusterRole 26 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 27 | apiGroup: rbac.authorization.k8s.io 28 | --- 29 | # A ClusterRole which instructs the CSR approver to approve a node requesting a 30 | # serving cert matching its client cert. 31 | kind: ClusterRole 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | metadata: 34 | name: approve-node-server-renewal-csr 35 | rules: 36 | - apiGroups: ["certificates.k8s.io"] 37 | resources: ["certificatesigningrequests/selfnodeserver"] 38 | verbs: ["create"] 39 | --- 40 | # To let a node of the group "system:nodes" renew its own server credentials 41 | kind: ClusterRoleBinding 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | metadata: 44 | name: node-server-cert-renewal 45 | subjects: 46 | - kind: Group 47 | name: system:nodes 48 | apiGroup: rbac.authorization.k8s.io 49 | roleRef: 50 | kind: ClusterRole 51 | name: approve-node-server-renewal-csr 52 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /salt/k8s/templates/kube-api-server/encryption-config.yaml.template: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: {{ ENCRYPTION_KEY }} 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-api-server/kube-apiserver.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=NetworkManager.target 5 | 6 | [Service] 7 | ExecStart=/usr/local/bin/kube-apiserver \ 8 | --advertise-address={{ NODE_IP }} \ 9 | --allow-privileged=true \ 10 | --authorization-mode=Node,RBAC \ 11 | --client-ca-file=/etc/kubernetes/pki/ca.pem \ 12 | --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodPreset \ 13 | --enable-bootstrap-token-auth=true \ 14 | --enable-aggregator-routing=true \ 15 | --etcd-servers={{ ETCD_ENDPOINTS }} \ 16 | --insecure-port=0 \ 17 | --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.pem \ 18 | --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client-key.pem \ 19 | --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ 20 | --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \ 21 | --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \ 22 | --requestheader-client-ca-file=/etc/kubernetes/pki/ca.pem \ 23 | --requestheader-allowed-names=front-proxy-client \ 24 | --requestheader-extra-headers-prefix=X-Remote-Extra- \ 25 | --requestheader-group-headers=X-Remote-Group \ 26 | --requestheader-username-headers=X-Remote-User \ 27 | --secure-port=6443 \ 28 | --service-account-key-file=/etc/kubernetes/pki/ca-key.pem \ 29 | --service-cluster-ip-range={{ SERVICE_CIDR }} \ 30 | --tls-cert-file=/etc/kubernetes/pki/apiserver-kubelet-client.pem \ 31 | --tls-private-key-file=/etc/kubernetes/pki/apiserver-kubelet-client-key.pem \ 32 | --service-node-port-range={{ NODE_PORT_RANGE }} \ 33 | --max-mutating-requests-inflight=2000 \ 34 | --max-requests-inflight=4000 \ 35 | --audit-log-maxage=7 \ 36 | --audit-log-maxbackup=10 \ 37 | --audit-log-maxsize=100 \ 38 | --audit-log-path=/var/log/kubernetes/kube-apiserver-audit.log \ 39 | --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ 40 | --runtime-config=api/all=true 41 | Restart=on-failure 42 | RestartSec=10 43 | Type=notify 44 | LimitNOFILE=65536 45 | 46 | [Install] 47 | WantedBy=multi-user.target 48 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-api-server/kubelet-bootstrap-rbac.yml.template: -------------------------------------------------------------------------------- 1 | #允许 system:bootstrappers 组的所有 csr 2 | # Approve all CSRs for the group "system:bootstrappers" 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: kubelet-bootstrap 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: system:node-bootstrapper 11 | subjects: 12 | - apiGroup: rbac.authorization.k8s.io 13 | kind: Group 14 | name: system:bootstrappers:default-node-token 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRoleBinding 18 | metadata: 19 | name: node-autoapprove-bootstrap 20 | roleRef: 21 | apiGroup: rbac.authorization.k8s.io 22 | kind: ClusterRole 23 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient 24 | subjects: 25 | - apiGroup: rbac.authorization.k8s.io 26 | kind: Group 27 | name: system:bootstrappers:default-node-token 28 | --- 29 | # Approve renewal CSRs for the group "system:nodes" 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: ClusterRoleBinding 32 | metadata: 33 | name: node-autoapprove-certificate-rotation 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 38 | subjects: 39 | - apiGroup: rbac.authorization.k8s.io 40 | kind: Group 41 | name: system:nodes 42 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-controller-manager/kube-controller-manager.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-controller-manager \ 7 | --profiling=true \ 8 | --cluster-name=kubernetes \ 9 | --controllers=*,bootstrapsigner,tokencleaner \ 10 | --kube-api-qps=1000 \ 11 | --kube-api-burst=2000 \ 12 | --leader-elect=true \ 13 | --use-service-account-credentials=true \ 14 | --concurrent-service-syncs=2 \ 15 | --bind-address=0.0.0.0 \ 16 | --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf \ 17 | --kubeconfig=/etc/kubernetes/controller-manager.conf \ 18 | --client-ca-file=/etc/kubernetes/pki/ca.pem \ 19 | --requestheader-client-ca-file=/etc/kubernetes/pki/ca.pem \ 20 | --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf \ 21 | --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \ 22 | --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \ 23 | --experimental-cluster-signing-duration=876000h \ 24 | --horizontal-pod-autoscaler-sync-period=10s \ 25 | --concurrent-deployment-syncs=10 \ 26 | --concurrent-gc-syncs=30 \ 27 | --node-cidr-mask-size=24 \ 28 | --service-cluster-ip-range={{ SERVICE_CIDR }} \ 29 | --pod-eviction-timeout=10s \ 30 | --terminated-pod-gc-threshold=10000 \ 31 | --root-ca-file=/etc/kubernetes/pki/ca.pem \ 32 | --service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem \ 33 | --node-monitor-grace-period=10s \ 34 | --node-monitor-period=3s \ 35 | --node-startup-grace-period=20s 36 | Restart=on-failure 37 | RestartSec=5 38 | 39 | [Install] 40 | WantedBy=multi-user.target 41 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-proxy/kube-proxy.config.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 2 | bindAddress: 0.0.0.0 3 | clientConnection: 4 | acceptContentTypes: "" 5 | burst: 10 6 | contentType: application/vnd.kubernetes.protobuf 7 | kubeconfig: /etc/kubernetes/proxy.config 8 | qps: 5 9 | clusterCIDR: {{ CLUSTER_CIDR }} 10 | configSyncPeriod: 15m0s 11 | conntrack: 12 | max: null 13 | maxPerCore: 32768 14 | min: 131072 15 | tcpCloseWaitTimeout: 1h0m0s 16 | tcpEstablishedTimeout: 24h0m0s 17 | enableProfiling: false 18 | healthzBindAddress: 0.0.0.0:10256 19 | iptables: 20 | masqueradeAll: true 21 | masqueradeBit: 14 22 | minSyncPeriod: 0s 23 | syncPeriod: 30s 24 | ipvs: 25 | excludeCIDRs: null 26 | minSyncPeriod: 0s 27 | scheduler: "" 28 | syncPeriod: 30s 29 | kind: KubeProxyConfiguration 30 | metricsBindAddress: 127.0.0.1:10249 31 | mode: "ipvs" 32 | nodePortAddresses: null 33 | oomScoreAdj: -999 34 | portRange: "" 35 | resourceContainer: /kube-proxy 36 | udpIdleTimeout: 250ms 37 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-proxy/kube-proxy.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube-Proxy Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=NetworkManager.target 5 | [Service] 6 | WorkingDirectory=/var/lib/kube-proxy 7 | ExecStart=/usr/local/bin/kube-proxy \ 8 | --config=/etc/kubernetes/kube-proxy.config.yaml \ 9 | --hostname-override={{ HOST_NAME }} 10 | 11 | Restart=on-failure 12 | RestartSec=5 13 | LimitNOFILE=65536 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /salt/k8s/templates/kube-scheduler/kube-scheduler.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-scheduler \ 7 | --leader-elect=true \ 8 | --bind-address=127.0.0.1 \ 9 | --kubeconfig=/etc/kubernetes/scheduler.conf 10 | Restart=always 11 | RestartSec=5 12 | StartLimitInterval=0 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /salt/k8s/templates/kubelet/10-kubeadm.conf.template: -------------------------------------------------------------------------------- 1 | # Note: This dropin only works with kubeadm and kubelet v1.11+ 2 | [Service] 3 | Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" 4 | Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" 5 | Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 6 | Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 --hostname-override={{ HOST_NAME }}" 7 | # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically 8 | EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env 9 | # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use 10 | # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. 11 | EnvironmentFile=-/etc/sysconfig/kubelet 12 | ExecStart= 13 | ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_NETWORK_ARGS $KUBELET_SYSTEM_PODS_ARGS 14 | -------------------------------------------------------------------------------- /salt/k8s/templates/kubelet/kubelet-conf.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: kubelet.config.k8s.io/v1beta1 2 | kind: KubeletConfiguration 3 | address: 0.0.0.0 4 | port: 10250 5 | readOnlyPort: 10255 6 | authentication: 7 | anonymous: 8 | enabled: false 9 | webhook: 10 | cacheTTL: 2m0s 11 | enabled: true 12 | x509: 13 | clientCAFile: /etc/kubernetes/pki/ca.pem 14 | authorization: 15 | mode: Webhook 16 | webhook: 17 | cacheAuthorizedTTL: 5m0s 18 | cacheUnauthorizedTTL: 30s 19 | cgroupDriver: systemd 20 | cgroupsPerQOS: true 21 | clusterDNS: 22 | - {{ CLUSTER_DNS_SVC_IP }} 23 | clusterDomain: {{ CLUSTER_DNS_DOMAIN }} 24 | containerLogMaxFiles: 5 25 | containerLogMaxSize: 10Mi 26 | contentType: application/vnd.kubernetes.protobuf 27 | cpuCFSQuota: true 28 | cpuManagerPolicy: none 29 | cpuManagerReconcilePeriod: 10s 30 | enableControllerAttachDetach: true 31 | enableDebuggingHandlers: true 32 | enforceNodeAllocatable: 33 | - pods 34 | eventBurst: 10 35 | eventRecordQPS: 5 36 | evictionHard: 37 | imagefs.available: 15% 38 | memory.available: 100Mi 39 | nodefs.available: 10% 40 | nodefs.inodesFree: 5% 41 | evictionPressureTransitionPeriod: 5m0s 42 | failSwapOn: true 43 | featureGates: 44 | CSIMigration: false 45 | fileCheckFrequency: 20s 46 | hairpinMode: promiscuous-bridge 47 | healthzBindAddress: 127.0.0.1 48 | healthzPort: 10248 49 | httpCheckFrequency: 20s 50 | imageGCHighThresholdPercent: 85 51 | imageGCLowThresholdPercent: 80 52 | imageMinimumGCAge: 2m0s 53 | iptablesDropBit: 15 54 | iptablesMasqueradeBit: 14 55 | kubeAPIBurst: 10 56 | kubeAPIQPS: 5 57 | makeIPTablesUtilChains: true 58 | maxOpenFiles: 1000000 59 | maxPods: 110 60 | nodeStatusUpdateFrequency: 10s 61 | oomScoreAdj: -999 62 | podPidsLimit: -1 63 | registryBurst: 10 64 | registryPullQPS: 5 65 | resolvConf: /etc/resolv.conf 66 | rotateCertificates: true 67 | runtimeRequestTimeout: 2m0s 68 | serializeImagePulls: true 69 | staticPodPath: /etc/kubernetes/manifests 70 | streamingConnectionIdleTimeout: 4h0m0s 71 | syncFrequency: 1m0s 72 | volumeStatsAggPeriod: 1m0s 73 | -------------------------------------------------------------------------------- /salt/k8s/templates/kubelet/kubelet.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubelet: The Kubernetes Node Agent 3 | Documentation=https://kubernetes.io/docs/ 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kubelet 7 | Restart=always 8 | StartLimitInterval=0 9 | RestartSec=10 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /salt/k8s/templates/nginx/kube-nginx.conf.template: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | 3 | events { 4 | worker_connections 20240; 5 | use epoll; 6 | } 7 | error_log /usr/local/kube-nginx/nginx_error.log info; 8 | stream { 9 | upstream kube-servers { 10 | hash $remote_addr consistent; 11 | server {{ MASTER_H1 }}:6443 max_fails=3 fail_timeout=30s; 12 | server {{ MASTER_H2 }}:6443 max_fails=3 fail_timeout=30s; 13 | server {{ MASTER_H3 }}:6443 max_fails=3 fail_timeout=30s; 14 | } 15 | 16 | server { 17 | listen 8443 reuseport; 18 | proxy_connect_timeout 3s; 19 | proxy_timeout 3000s; 20 | proxy_pass kube-servers; 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /salt/k8s/templates/nginx/kube-nginx.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kube-apiserver nginx proxy 3 | After=NetworkManager.target 4 | 5 | [Service] 6 | Type=forking 7 | ExecStartPre=/usr/local/kube-nginx/sbin/nginx -c /usr/local/kube-nginx/conf/kube-nginx.conf -p /usr/local/kube-nginx -t 8 | ExecStart=/usr/local/kube-nginx/sbin/nginx -c /usr/local/kube-nginx/conf/kube-nginx.conf -p /usr/local/kube-nginx 9 | ExecReload=/usr/local/kube-nginx/sbin/nginx -c /usr/local/kube-nginx/conf/kube-nginx.conf -p /usr/local/kube-nginx -s reload 10 | PrivateTmp=true 11 | Restart=always 12 | RestartSec=5 13 | StartLimitInterval=0 14 | LimitNOFILE=65536 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /salt/top.sls: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #****************************************** 3 | # Author: skymyyang 4 | # Email: yang-li@live.cn 5 | # Organization: https://www.cnblogs.com/skymyyang/ 6 | # Description: Kubernetes Master 7 | #****************************************** 8 | 9 | base: 10 | 'worker-role: node': 11 | - match: grain 12 | - k8s.baseset 13 | 'etcd-role:node': 14 | - match: grain 15 | - k8s.etcd 16 | 'ca-file-role: admin': 17 | - match: grain 18 | - k8s.modules.ca-file-generate 19 | 'k8s-role:master': 20 | - match: grain 21 | - k8s.master 22 | 'kubelet-bootstrap-role: admin': 23 | - match: grain 24 | - k8s.modules.kubelet-bootstrap-kubeconfig 25 | 'kubelet-role: node': 26 | - match: grain 27 | - k8s.node 28 | 'calico-role:admin': 29 | - match: grain 30 | - k8s.modules.calico 31 | --------------------------------------------------------------------------------