├── .gitignore ├── LICENSE ├── README.md ├── docker_registry ├── README.md ├── create_docker_registry.sh ├── create_docker_registry_compose.sh ├── docker-compose.yaml ├── test_docker_registry.sh └── use_aliyun_registry_mirror_and_private_registry.sh ├── k8s_dashboard ├── 01_create_k8s_dashboard.sh ├── 02_create_sample_user.sh ├── 03_generate_user_cert.sh ├── 04_install_heapster.sh ├── README.md ├── dashboard_cluster_role_binding_admin.yaml ├── dashboard_service_account_admin.yaml ├── deploy_k8s_dashboard.sh ├── use_aliyun_heapster_images.sh └── use_aliyun_k8s_dashboard_images.sh ├── kubeadm ├── 01_pre_check_and_configure.sh ├── 02_install_docker.sh ├── 03_install_kubernetes.sh ├── 04_pull_kubernetes_images_from_aliyun.sh ├── 04_pull_kubernetes_node_images_from_aliyun.sh ├── 05_kubeadm_init.sh ├── 06_install_flannel.sh ├── CentOS7-Aliyun.repo ├── configure_cgroup.sh ├── kubeadm_init_master.sh ├── kubeadm_join_node.sh ├── use_aliyun_docker_registry.sh ├── use_aliyun_kubernetes_yum_source.sh └── use_aliyun_yum_source.sh ├── kubeadm_v1.10.3 ├── 01_pre_check_and_configure.sh ├── 02_install_docker.sh ├── 03_install_kubernetes.sh ├── 04_pull_kubernetes_images_from_aliyun.sh ├── 04_pull_kubernetes_node_images_from_aliyun.sh ├── 05_kubeadm_init.sh ├── 06_install_flannel.sh ├── CentOS7-Aliyun.repo ├── README.md ├── configure_cgroup.sh ├── kubeadm_init_master.sh ├── kubeadm_join_node.sh ├── use_aliyun_docker_registry.sh ├── use_aliyun_kubernetes_yum_source.sh └── use_aliyun_yum_source.sh ├── kubeadm_v1.11.0 ├── 01_pre_check_and_configure.sh ├── 02_install_docker.sh ├── 03_install_kubernetes.sh ├── 04_pull_kubernetes_images_from_aliyun.sh ├── 04_pull_kubernetes_node_images_from_aliyun.sh ├── 05_kubeadm_init.sh ├── 06_install_flannel.sh ├── CentOS7-Aliyun.repo ├── README.md ├── configure_cgroup.sh ├── kubeadm_init_master.sh ├── kubeadm_join_node.sh ├── use_aliyun_docker_registry.sh ├── use_aliyun_kubernetes_yum_source.sh └── use_aliyun_yum_source.sh ├── kubeadm_v1.13.0 ├── 01_pre_check_and_configure.sh ├── 02_install_docker.sh ├── 03_install_kubernetes.sh ├── 04_pull_kubernetes_images_from_aliyun.sh ├── 04_pull_kubernetes_node_images_from_aliyun.sh ├── 05_kubeadm_init.sh ├── 06_install_flannel.sh ├── CentOS7-Aliyun.repo ├── README.md ├── configure_cgroup.sh ├── epel-7-Aliyun.repo ├── k8s_health_check.sh ├── kubeadm_init_master.sh ├── kubeadm_join_node.sh ├── pull_flannel_images_from_aliyun.sh ├── use_aliyun_docker_registry.sh ├── use_aliyun_kubernetes_yum_source.sh └── use_aliyun_yum_source.sh ├── kubeadm_v1.19.3 ├── 01_pre_check_and_configure.sh ├── 02_install_docker.sh ├── 03_install_kubernetes.sh ├── 04_pull_calico_images.sh ├── 04_pull_kubernetes_images_from_aliyun.sh ├── 04_pull_kubernetes_node_images_from_aliyun.sh ├── 05_kubeadm_init.sh ├── 06_install_calico.sh ├── 07_install_metrics_server.sh ├── 08_install_prometheus_grafana.sh ├── aliyun │ ├── repo │ │ ├── CentOS7-Aliyun.repo │ │ └── epel-7-Aliyun.repo │ ├── use_aliyun_docker_registry.sh │ ├── use_aliyun_kubernetes_yum_source.sh │ └── use_aliyun_yum_source.sh ├── calico │ ├── calico-ens33.yaml │ ├── calico-eth-ens.yaml │ ├── calico-eth0.yaml │ └── calico.yaml ├── enable_kubectl_master.sh ├── enable_kubectl_worker.sh ├── haproxy │ └── haproxy.template.cfg ├── k8s_health_check.sh ├── keepalived │ ├── check_apiserver.sh │ ├── keepalived.bakcup.template.cfg │ └── keepalived.master.template.cfg ├── kubeadm_init_master.sh ├── kubeadm_join_node.sh ├── metrics-server │ ├── metrics-server-insecure-hostnetwork.yaml │ └── metrics-server-insecure.yaml ├── prometheus_grafana │ └── manifests │ │ ├── alertmanager-alertmanager.yaml │ │ ├── alertmanager-secret.yaml │ │ ├── alertmanager-service.yaml │ │ ├── alertmanager-serviceAccount.yaml │ │ ├── alertmanager-serviceMonitor.yaml │ │ ├── grafana-dashboardDatasources.yaml │ │ ├── grafana-dashboardDefinitions.yaml │ │ ├── grafana-dashboardSources.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-service.yaml │ │ ├── grafana-serviceAccount.yaml │ │ ├── grafana-serviceMonitor.yaml │ │ ├── kube-state-metrics-clusterRole.yaml │ │ ├── kube-state-metrics-clusterRoleBinding.yaml │ │ ├── kube-state-metrics-deployment.yaml │ │ ├── kube-state-metrics-service.yaml │ │ ├── kube-state-metrics-serviceAccount.yaml │ │ ├── kube-state-metrics-serviceMonitor.yaml │ │ ├── node-exporter-clusterRole.yaml │ │ ├── node-exporter-clusterRoleBinding.yaml │ │ ├── node-exporter-daemonset.yaml │ │ ├── node-exporter-service.yaml │ │ ├── node-exporter-serviceAccount.yaml │ │ ├── node-exporter-serviceMonitor.yaml │ │ ├── prometheus-adapter-apiService.yaml │ │ ├── prometheus-adapter-clusterRole.yaml │ │ ├── prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml │ │ ├── prometheus-adapter-clusterRoleBinding.yaml │ │ ├── prometheus-adapter-clusterRoleBindingDelegator.yaml │ │ ├── prometheus-adapter-clusterRoleServerResources.yaml │ │ ├── prometheus-adapter-configMap.yaml │ │ ├── prometheus-adapter-deployment.yaml │ │ ├── prometheus-adapter-roleBindingAuthReader.yaml │ │ ├── prometheus-adapter-service.yaml │ │ ├── prometheus-adapter-serviceAccount.yaml │ │ ├── prometheus-adapter-serviceMonitor.yaml │ │ ├── prometheus-clusterRole.yaml │ │ ├── prometheus-clusterRoleBinding.yaml │ │ ├── prometheus-operator-serviceMonitor.yaml │ │ ├── prometheus-prometheus.yaml │ │ ├── prometheus-roleBindingConfig.yaml │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ │ ├── prometheus-roleConfig.yaml │ │ ├── prometheus-roleSpecificNamespaces.yaml │ │ ├── prometheus-rules.yaml │ │ ├── prometheus-service.yaml │ │ ├── prometheus-serviceAccount.yaml │ │ ├── prometheus-serviceMonitor.yaml │ │ ├── prometheus-serviceMonitorApiserver.yaml │ │ ├── prometheus-serviceMonitorCoreDNS.yaml │ │ ├── prometheus-serviceMonitorKubeControllerManager.yaml │ │ ├── prometheus-serviceMonitorKubeScheduler.yaml │ │ ├── prometheus-serviceMonitorKubelet.yaml │ │ └── setup │ │ ├── 0namespace-namespace.yaml │ │ ├── prometheus-operator-0alertmanagerConfigCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0probeCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0prometheusCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ │ ├── prometheus-operator-0thanosrulerCustomResourceDefinition.yaml │ │ ├── prometheus-operator-clusterRole.yaml │ │ ├── prometheus-operator-clusterRoleBinding.yaml │ │ ├── prometheus-operator-deployment.yaml │ │ ├── prometheus-operator-service.yaml │ │ └── prometheus-operator-serviceAccount.yaml └── proxy │ └── docker │ ├── http-proxy.conf.bak │ ├── unuse_docker_proxy.sh │ └── use_docker_proxy.sh └── os ├── init_virtual_machine.sh └── use_centos_yum.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | *.iml 3 | 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 cookcodeblog 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # 使用kubeadm一键部署kubernetes集群 4 | 5 | 6 | 7 | ## 部署Kubernetes v1.10.3 8 | 9 | 10 | 11 | 参见 [kubeadm_v1.10.3](https://github.com/cookcodeblog/k8s-deploy/tree/master/kubeadm_v1.10.3) 12 | 13 | 14 | 15 | ## 部署Kubernetes v1.11.0 16 | 17 | 18 | 19 | 参见 [kubeadm_v1.11.0](https://github.com/cookcodeblog/k8s-deploy/tree/master/kubeadm_v1.11.0) 20 | 21 | 22 | ## 部署Kubernetes v1.13.0 23 | 24 | 25 | 26 | 参见 [kubeadm_v1.13.0](https://github.com/cookcodeblog/k8s-deploy/tree/master/kubeadm_v1.13.0) 27 | 28 | 29 | ## 部署Kubernetes v1.19.3 30 | 31 | 32 | 33 | 参见 [kubeadm_v1.19.3](https://github.com/cookcodeblog/k8s-deploy/tree/master/kubeadm_v1.19.3) 34 | 35 | 36 | 37 | 38 | 39 | ## 详细文档 40 | 41 | 42 | 43 | 详细文档请参考我的博客文章: 44 | 45 | * [使用kubeadm一键部署kubernetes集群](https://blog.csdn.net/nklinsirui/article/details/80602724) 46 | * [在CentOS7上用kubeadm安装多Master节点的高可用Kubernetes集群](https://cookcode.blog.csdn.net/article/details/109265060) 47 | 48 | 49 | -------------------------------------------------------------------------------- /docker_registry/README.md: -------------------------------------------------------------------------------- 1 | [TOC] 2 | 3 | # 一键搭建Docker private registry 4 | 5 | 6 | 7 | ## 前言 8 | 9 | `docker_registry` 目录提供了2种方法来一键搭建Docker private registry。 10 | 11 | 12 | 13 | 特点: 14 | 15 | * 一键安装,方便快捷 16 | * 自动添加`insecure-registries` Docker运行参数 17 | * 使用阿里云镜像加速器,以后拉取官方镜像快 18 | * 支持重复运行脚本搭建 19 | 20 | 21 | 22 | 限制: 23 | 24 | * 目前只支持HTTP方式,还不支持HTTPS方式。 25 | * 选择以下两种方式之一,不要混合使用,否则需要手工停止并删除`registry` 镜像。 26 | 27 | 28 | 29 | > 先在docker_registy目录运行`chmod u+x *.sh` 设置脚本运行权限,再运行下面的脚本搭建。 30 | > 31 | > 搭建完后可以运行`./test_docker_registry.sh` 检查是否搭建成功。 32 | 33 | 34 | 35 | ## Docker run方式 36 | 37 | 38 | 39 | 运行`./create_docker_registry.sh` 。 40 | 41 | 42 | 43 | ## Docker Compose方式 44 | 45 | 46 | 47 | 运行`./create_docker_registry_compose.sh` 。 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /docker_registry/create_docker_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | set -e 5 | 6 | # Pull registry:2 image and run it 7 | if [[ `docker ps | grep registry | wc -l` > 0 ]]; then 8 | docker stop registry 9 | docker rm registry 10 | fi 11 | docker run -d -p 5000:5000 -v /var/lib/docker-registry:/var/lib/registry -e REGISTRY_STORAGE_DELETE_ENABLED="true" --restart=always --name registry registry:2 12 | 13 | ./use_aliyun_registry_mirror_and_private_registry.sh 14 | -------------------------------------------------------------------------------- /docker_registry/create_docker_registry_compose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | set -e 5 | 6 | # Pull registry:2 image and run it 7 | if [[ `docker ps | grep registry | wc -l` > 0 ]]; then 8 | docker-compose down 9 | fi 10 | docker-compose up -d 11 | 12 | ./use_aliyun_registry_mirror_and_private_registry.sh 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /docker_registry/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | registry: 2 | restart: always 3 | image: registry:2 4 | container_name: registry 5 | environment: 6 | REGISTRY_STORAGE_DELETE_ENABLED: "true" 7 | ports: 8 | - 5000:5000 9 | volumes: 10 | - /var/lib/docker-registry:/var/lib/registry 11 | -------------------------------------------------------------------------------- /docker_registry/test_docker_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker pull busybox 4 | docker images | grep busybox 5 | docker tag busybox 192.168.37.100:5000/busybox 6 | docker images | grep busybox 7 | docker push 192.168.37.100:5000/busybox 8 | docker rmi busybox 9 | docker rmi 192.168.37.100:5000/busybox 10 | docker images | grep busybox 11 | docker pull 192.168.37.100:5000/busybox 12 | docker images | grep busybox 13 | docker rmi 192.168.37.100:5000/busybox 14 | docker images | grep busybox 15 | -------------------------------------------------------------------------------- /docker_registry/use_aliyun_registry_mirror_and_private_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | set -e 5 | 6 | # Use Aliyun docker registry mirror and set insecure-registries 7 | sudo mkdir -p /etc/docker 8 | sudo tee /etc/docker/daemon.json <<-'EOF' 9 | { 10 | "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"], 11 | "insecure-registries": ["192.168.37.100:5000"] 12 | } 13 | EOF 14 | 15 | # Restart docker 16 | systemctl daemon-reload 17 | systemctl restart docker 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /k8s_dashboard/01_create_k8s_dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Use Aligyun k8s dashboard images 6 | ./use_aliyun_k8s_dashboard_images.sh 7 | 8 | # Deploy k8s master 9 | kubectl apply -f kubernetes-dashboard.yaml 10 | 11 | # Check pod status 12 | kubectl get pods --namespace=kube-system | grep kubernetes-dashboard 13 | 14 | # Check pod details 15 | kubectl describe pods kubernetes-dashboard --namespace=kube-system 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /k8s_dashboard/02_create_sample_user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | 6 | # Create Service Account 7 | kubectl apply -f dashboard_service_account_admin.yaml 8 | 9 | # Create Cluster Role Binding 10 | kubectl apply -f dashboard_cluster_role_binding_admin.yaml 11 | 12 | # Get Service Account Token 13 | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') 14 | -------------------------------------------------------------------------------- /k8s_dashboard/03_generate_user_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | grep 'client-certificate-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.crt 6 | grep 'client-key-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.key 7 | openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -name "kubernetes-client" 8 | 9 | echo "Genereated kubecfg certificates under $(pwd): " 10 | ls -ltra kubecfg* 11 | 12 | echo "Please install the kubecfg.p12 certificate in your browser, and then restart browser." 13 | 14 | -------------------------------------------------------------------------------- /k8s_dashboard/04_install_heapster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Use Aliyun Heapster images 6 | ./use_aliyun_heapster_images.sh 7 | 8 | # Create K8S resources 9 | kubectl apply -f grafana.yaml 10 | kubectl apply -f heapster.yaml 11 | kubectl apply -f influxdb.yaml 12 | 13 | 14 | kubectl apply -f heapster-rbac.yaml 15 | 16 | # Check Pod status 17 | 18 | kubectl get pods -n kube-system 19 | 20 | # Check cluster info 21 | 22 | kubectl cluster-info 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /k8s_dashboard/README.md: -------------------------------------------------------------------------------- 1 | # 一键部署Kubernetes Dashboard 2 | 3 | 4 | 5 | 详细文档请参考我的博客文章: 6 | 7 | [一键部署 Kubernetes Dashboard v1.8.3](https://blog.csdn.net/nklinsirui/article/details/80806131) 8 | 9 | -------------------------------------------------------------------------------- /k8s_dashboard/dashboard_cluster_role_binding_admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kube-system -------------------------------------------------------------------------------- /k8s_dashboard/dashboard_service_account_admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system -------------------------------------------------------------------------------- /k8s_dashboard/deploy_k8s_dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Create kubernetes dashboard 6 | ./01_create_k8s_dashboard.sh 7 | 8 | # Create sample user 9 | ./02_create_sample_user.sh 10 | 11 | # Generate user certificate 12 | ./03_generate_user_cert.sh 13 | 14 | # Prompt to login 15 | echo "Please login K8S dashboard:" 16 | echo "https://your_master_ip:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/" 17 | echo "Please paste above generated Service Account Token to login" 18 | 19 | 20 | # Install Heapster 21 | ./04_install_heapster.sh 22 | 23 | -------------------------------------------------------------------------------- /k8s_dashboard/use_aliyun_heapster_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Download yaml files 6 | 7 | wget -O grafana.yaml https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/grafana.yaml 8 | wget -O heapster.yaml https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml 9 | wget -O influxdb.yaml https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml 10 | 11 | 12 | 13 | cp -p grafana.yaml grafana.yaml.bak$(date '+%Y%m%d%H%M%S') 14 | # Set port ype as "NodePort" for test environment 15 | sed -i "s/\# type: NodePort/type: NodePort/g" grafana.yaml 16 | # Set only use API server proxy to access grafana 17 | 18 | sed -i "s/value: \//\#value: \//g" grafana.yaml 19 | sed -i "s/\# \#value: \/api/value: \/api/g" grafana.yaml 20 | # Change heapster-grafana-amd64 version from v5.0.4 to v4.4.3, beacuse in gcr.io the latest version is v4.4.3 21 | sed -i "s/v5\.0\.4/v4\.4\.3/g" grafana.yaml 22 | # Replace k8s.gcr.io image with registry.cn-shenzhen.aliyuncs.com/cookcodeblog 23 | sed -i "s/k8s\.gcr\.io/registry\.cn-shenzhen\.aliyuncs\.com\/cookcodeblog/g" grafana.yaml 24 | 25 | 26 | cp -p heapster.yaml heapster.yaml.bak$(date '+%Y%m%d%H%M%S') 27 | # Replace k8s.gcr.io image with registry.cn-shenzhen.aliyuncs.com/cookcodeblog 28 | sed -i "s/k8s\.gcr\.io/registry\.cn-shenzhen\.aliyuncs\.com\/cookcodeblog/g" heapster.yaml 29 | 30 | 31 | 32 | 33 | cp -p influxdb.yaml influxdb.yaml.bak$(date '+%Y%m%d%H%M%S') 34 | # Change heapster-influxdb-amd64 version from v1.5.2 to v1.3.3, beacuse in gcr.io the latest version is v1.3.3 35 | sed -i "s/v1\.5\.2/v1\.3\.3/g" influxdb.yaml 36 | # Replace k8s.gcr.io image with registry.cn-shenzhen.aliyuncs.com/cookcodeblog 37 | sed -i "s/k8s\.gcr\.io/registry\.cn-shenzhen\.aliyuncs\.com\/cookcodeblog/g" influxdb.yaml 38 | 39 | 40 | 41 | wget -O heapster-rbac.yaml https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /k8s_dashboard/use_aliyun_k8s_dashboard_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | wget -O kubernetes-dashboard.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v1.8.3/src/deploy/recommended/kubernetes-dashboard.yaml 6 | cp -p kubernetes-dashboard.yaml kubernetes-dashboard.yaml.bak$(date '+%Y%m%d%H%M%S') 7 | 8 | # Replace k8s.gcr.io image with registry.cn-shenzhen.aliyuncs.com/cookcodeblog 9 | sed -i "s/k8s\.gcr\.io/registry\.cn-shenzhen\.aliyuncs\.com\/cookcodeblog/g" kubernetes-dashboard.yaml 10 | 11 | 12 | -------------------------------------------------------------------------------- /kubeadm/01_pre_check_and_configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "###############################################" 6 | echo "Please ensure your OS is CentOS7 64 bits" 7 | echo "Please ensure your machine has full network connection and internet access" 8 | echo "Please ensure run this script with root user" 9 | 10 | # Check hostname, Mac addr and product_uuid 11 | echo "###############################################" 12 | echo "Please check hostname as below:" 13 | uname -a 14 | 15 | echo "###############################################" 16 | echo "Please check Mac addr and product_uuid as below:" 17 | ip link 18 | sudo cat /sys/class/dmi/id/product_uuid 19 | 20 | # Stop firewalld 21 | echo "###############################################" 22 | echo "Stop firewalld" 23 | systemctl stop firewalld 24 | systemctl disable firewalld 25 | 26 | # Disable SELinux 27 | echo "###############################################" 28 | echo "Disable SELinux" 29 | setenforce 0 30 | cp -p /etc/selinux/config /etc/selinux/config.bak$(date '+%Y%m%d%H%M%S') 31 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 32 | 33 | # Turn off Swap 34 | echo "###############################################" 35 | echo "Turn off Swap" 36 | swapoff -a 37 | cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') 38 | sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab 39 | sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab 40 | mount -a 41 | free -m 42 | cat /proc/swaps 43 | 44 | # Setup iptables (routing) 45 | echo "###############################################" 46 | echo "Setup iptables (routing)" 47 | cat < /etc/sysctl.d/k8s.conf 48 | net.bridge.bridge-nf-call-ip6tables = 1 49 | net.bridge.bridge-nf-call-iptables = 1 50 | net.bridge.bridge-nf-call-arptables = 1 51 | EOF 52 | 53 | sysctl --system 54 | 55 | # Use Aliyun Yum source 56 | echo "###############################################" 57 | echo "Use Aliyun Yum source" 58 | ./use_aliyun_yum_source.sh 59 | 60 | -------------------------------------------------------------------------------- /kubeadm/02_install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Uninstall installed docker 6 | sudo yum remove docker \ 7 | docker-client \ 8 | docker-client-latest \ 9 | docker-common \ 10 | docker-latest \ 11 | docker-latest-logrotate \ 12 | docker-logrotate \ 13 | docker-selinux \ 14 | docker-engine-selinux \ 15 | docker-engine 16 | 17 | 18 | # Set up repository 19 | sudo yum install -y yum-utils device-mapper-persistent-data lvm2 20 | 21 | # Use Aliyun Docker 22 | sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 23 | 24 | 25 | # Install docker 26 | # on a new system with yum repo defined, forcing older version and ignoring obsoletes introduced by 17.06.0 27 | yum install -y --setopt=obsoletes=0 \ 28 | docker-ce-17.03.2.ce-1.el7.centos.x86_64 \ 29 | docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch 30 | 31 | systemctl enable docker 32 | systemctl start docker 33 | 34 | docker version 35 | 36 | 37 | # Use Aliyun docker registry 38 | ./use_aliyun_docker_registry.sh 39 | 40 | -------------------------------------------------------------------------------- /kubeadm/03_install_kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ./use_aliyun_kubernetes_yum_source.sh 6 | 7 | setenforce 0 8 | yum install -y kubelet-1.10.3 kubeadm-1.10.3 kubectl-1.10.3 9 | systemctl enable kubelet && systemctl start kubelet 10 | 11 | # Configure cgroup matched with Docker 12 | ./configure_cgroup.sh 13 | systemctl daemon-reload 14 | systemctl restart kubelet 15 | 16 | 17 | # Set pause-amd64 image for kubelet service 18 | cat > /etc/systemd/system/kubelet.service.d/20-pod-infra-image.conf <> $HOME/.bash_profile 19 | source $HOME/.bash_profile 20 | 21 | 22 | -------------------------------------------------------------------------------- /kubeadm/06_install_flannel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml 6 | kubectl apply -f kube-flannel.yml 7 | 8 | # Wait a while to let network takes effect 9 | sleep 10 10 | kubectl get pods --all-namespaces 11 | 12 | # Check component status 13 | kubectl get cs 14 | 15 | # Check pods status incase any pods are not in running status 16 | kubectl get pods --all-namespaces 17 | 18 | -------------------------------------------------------------------------------- /kubeadm/CentOS7-Aliyun.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Base.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | # If the mirrorlist= does not work for you, as a fall back you can try the 9 | # remarked out baseurl= line instead. 10 | # 11 | # 12 | 13 | [base] 14 | name=CentOS-7 - Base - mirrors.aliyun.com 15 | failovermethod=priority 16 | baseurl=http://mirrors.aliyun.com/centos/7/os/$basearch/ 17 | http://mirrors.aliyuncs.com/centos/7/os/$basearch/ 18 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=os 19 | gpgcheck=1 20 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 21 | 22 | #released updates 23 | [updates] 24 | name=CentOS-7 - Updates - mirrors.aliyun.com 25 | failovermethod=priority 26 | baseurl=http://mirrors.aliyun.com/centos/7/updates/$basearch/ 27 | http://mirrors.aliyuncs.com/centos/7/updates/$basearch/ 28 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=updates 29 | gpgcheck=1 30 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 31 | 32 | #additional packages that may be useful 33 | [extras] 34 | name=CentOS-7 - Extras - mirrors.aliyun.com 35 | failovermethod=priority 36 | baseurl=http://mirrors.aliyun.com/centos/7/extras/$basearch/ 37 | http://mirrors.aliyuncs.com/centos/7/extras/$basearch/ 38 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=extras 39 | gpgcheck=1 40 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 41 | 42 | #additional packages that extend functionality of existing packages 43 | [centosplus] 44 | name=CentOS-7 - Plus - mirrors.aliyun.com 45 | failovermethod=priority 46 | baseurl=http://mirrors.aliyun.com/centos/7/centosplus/$basearch/ 47 | http://mirrors.aliyuncs.com/centos/7/centosplus/$basearch/ 48 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=centosplus 49 | gpgcheck=1 50 | enabled=0 51 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 52 | 53 | #contrib - packages by Centos Users 54 | [contrib] 55 | name=CentOS-7 - Contrib - mirrors.aliyun.com 56 | failovermethod=priority 57 | baseurl=http://mirrors.aliyun.com/centos/7/contrib/$basearch/ 58 | http://mirrors.aliyuncs.com/centos/7/contrib/$basearch/ 59 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=contrib 60 | gpgcheck=1 61 | enabled=0 62 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 63 | -------------------------------------------------------------------------------- /kubeadm/configure_cgroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 6 | -------------------------------------------------------------------------------- /kubeadm/kubeadm_init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes images 15 | ./04_pull_kubernetes_images_from_aliyun.sh 16 | 17 | # Initialize k8s master 18 | ./05_kubeadm_init.sh 19 | 20 | # Install flannel Pod network 21 | ./06_install_flannel.sh 22 | -------------------------------------------------------------------------------- /kubeadm/kubeadm_join_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ## Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes node images 15 | ./04_pull_kubernetes_node_images_from_aliyun.sh 16 | 17 | 18 | # Join kubernetes node 19 | export KUBE_REPO_PREFIX="registry.cn-shenzhen.aliyuncs.com/cookcodeblog" 20 | export KUBE_ETCD_IMAGE="registry.cn-shenzhen.aliyuncs.com/cookcodeblog/etcd-amd64:3.1.12" 21 | 22 | 23 | # Put "kubeadm join" here from "kubeadm init" output 24 | # Example: kubeadm join 192.168.37.101:6443 --token mmxy0q.sjqca7zrzzj7czft --discovery-token-ca-cert-hash sha256:099421bf9b3c58e4e041e816ba6477477474614a17eca7f5d240eb733e7476bb 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /kubeadm/use_aliyun_docker_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sudo mkdir -p /etc/docker 6 | sudo tee /etc/docker/daemon.json <<-'EOF' 7 | { 8 | "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"] 9 | } 10 | EOF 11 | sudo systemctl daemon-reload 12 | sudo systemctl restart docker -------------------------------------------------------------------------------- /kubeadm/use_aliyun_kubernetes_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cat < /etc/yum.repos.d/kubernetes.repo 6 | [kubernetes] 7 | name=Kubernetes 8 | baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 9 | enabled=1 10 | gpgcheck=1 11 | repo_gpgcheck=1 12 | gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 13 | EOF 14 | 15 | yum clean all 16 | yum makecache -y 17 | yum repolist all 18 | -------------------------------------------------------------------------------- /kubeadm/use_aliyun_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | #wget -O /etc/yum.repos.d/CentOS7-Aliyun.repo http://mirrors.aliyun.com/repo/Centos-7.repo 6 | cp -p ./CentOS7-Aliyun.repo /etc/yum.repos.d 7 | yum clean all 8 | yum makecache -y 9 | yum repolist all 10 | 11 | yum install wget -y 12 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/01_pre_check_and_configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "###############################################" 6 | echo "Please ensure your OS is CentOS7 64 bits" 7 | echo "Please ensure your machine has full network connection and internet access" 8 | echo "Please ensure run this script with root user" 9 | 10 | # Check hostname, Mac addr and product_uuid 11 | echo "###############################################" 12 | echo "Please check hostname as below:" 13 | uname -a 14 | 15 | echo "###############################################" 16 | echo "Please check Mac addr and product_uuid as below:" 17 | ip link 18 | sudo cat /sys/class/dmi/id/product_uuid 19 | 20 | # Stop firewalld 21 | echo "###############################################" 22 | echo "Stop firewalld" 23 | systemctl stop firewalld 24 | systemctl disable firewalld 25 | 26 | # Disable SELinux 27 | echo "###############################################" 28 | echo "Disable SELinux" 29 | setenforce 0 30 | cp -p /etc/selinux/config /etc/selinux/config.bak$(date '+%Y%m%d%H%M%S') 31 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 32 | 33 | # Turn off Swap 34 | echo "###############################################" 35 | echo "Turn off Swap" 36 | swapoff -a 37 | cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') 38 | sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab 39 | sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab 40 | mount -a 41 | free -m 42 | cat /proc/swaps 43 | 44 | # Setup iptables (routing) 45 | echo "###############################################" 46 | echo "Setup iptables (routing)" 47 | cat < /etc/sysctl.d/k8s.conf 48 | net.bridge.bridge-nf-call-ip6tables = 1 49 | net.bridge.bridge-nf-call-iptables = 1 50 | net.bridge.bridge-nf-call-arptables = 1 51 | EOF 52 | 53 | sysctl --system 54 | 55 | # Use Aliyun Yum source 56 | echo "###############################################" 57 | echo "Use Aliyun Yum source" 58 | ./use_aliyun_yum_source.sh 59 | 60 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/02_install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Uninstall installed docker 6 | sudo yum remove docker \ 7 | docker-client \ 8 | docker-client-latest \ 9 | docker-common \ 10 | docker-latest \ 11 | docker-latest-logrotate \ 12 | docker-logrotate \ 13 | docker-selinux \ 14 | docker-engine-selinux \ 15 | docker-engine 16 | 17 | 18 | # Set up repository 19 | sudo yum install -y yum-utils device-mapper-persistent-data lvm2 20 | 21 | # Use Aliyun Docker 22 | sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 23 | 24 | 25 | # Install docker 26 | # on a new system with yum repo defined, forcing older version and ignoring obsoletes introduced by 17.06.0 27 | yum install -y --setopt=obsoletes=0 \ 28 | docker-ce-17.03.2.ce-1.el7.centos.x86_64 \ 29 | docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch 30 | 31 | systemctl enable docker 32 | systemctl start docker 33 | 34 | docker version 35 | 36 | 37 | # Use Aliyun docker registry 38 | ./use_aliyun_docker_registry.sh 39 | 40 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/03_install_kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ./use_aliyun_kubernetes_yum_source.sh 6 | 7 | setenforce 0 8 | yum install -y kubelet-1.10.3 kubeadm-1.10.3 kubectl-1.10.3 9 | systemctl enable kubelet && systemctl start kubelet 10 | 11 | # Configure cgroup matched with Docker 12 | ./configure_cgroup.sh 13 | systemctl daemon-reload 14 | systemctl restart kubelet 15 | 16 | 17 | # Use Kubernetes default pause image 18 | 19 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/04_pull_kubernetes_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check version in https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 6 | # Search "Running kubeadm without an internet connection" 7 | # For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: 8 | KUBE_VERSION=v1.10.3 9 | KUBE_PAUSE_VERSION=3.1 10 | ETCD_VERSION=3.1.12 11 | DNS_VERSION=1.14.8 12 | 13 | GCR_URL=k8s.gcr.io 14 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 15 | 16 | # When test v1.10.3, I found Kubernetes depends on both pause-amd64:3.1 and pause:3.1 17 | 18 | images=(kube-proxy-amd64:${KUBE_VERSION} 19 | kube-scheduler-amd64:${KUBE_VERSION} 20 | kube-controller-manager-amd64:${KUBE_VERSION} 21 | kube-apiserver-amd64:${KUBE_VERSION} 22 | pause:${KUBE_PAUSE_VERSION} 23 | pause-amd64:${KUBE_PAUSE_VERSION} 24 | etcd-amd64:${ETCD_VERSION} 25 | k8s-dns-sidecar-amd64:${DNS_VERSION} 26 | k8s-dns-kube-dns-amd64:${DNS_VERSION} 27 | k8s-dns-dnsmasq-nanny-amd64:${DNS_VERSION}) 28 | 29 | 30 | for imageName in ${images[@]} ; do 31 | docker pull $ALIYUN_URL/$imageName 32 | docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName 33 | docker rmi $ALIYUN_URL/$imageName 34 | done 35 | 36 | docker images 37 | 38 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/04_pull_kubernetes_node_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check version in https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 6 | # Search "Running kubeadm without an internet connection" 7 | # For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: 8 | KUBE_VERSION=v1.10.3 9 | KUBE_PAUSE_VERSION=3.1 10 | 11 | GCR_URL=k8s.gcr.io 12 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 13 | 14 | # When test v1.10.3, I found Kubernetes depends on both pause-amd64:3.1 and pause:3.1 15 | 16 | images=(kube-proxy-amd64:${KUBE_VERSION} 17 | pause:${KUBE_PAUSE_VERSION} 18 | pause-amd64:${KUBE_PAUSE_VERSION}) 19 | 20 | 21 | for imageName in ${images[@]} ; do 22 | docker pull $ALIYUN_URL/$imageName 23 | docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName 24 | docker rmi $ALIYUN_URL/$imageName 25 | done 26 | 27 | docker images 28 | 29 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/05_kubeadm_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Reset firstly if ran kubeadm init before 6 | kubeadm reset 7 | 8 | # kubeadm init with flannel network 9 | kubeadm init --kubernetes-version=v1.10.3 --pod-network-cidr=10.244.0.0/16 10 | 11 | 12 | mkdir -p $HOME/.kube 13 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 14 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 15 | cp -p $HOME/.bash_profile $HOME/.bash_profile.bak$(date '+%Y%m%d%H%M%S') 16 | echo "export KUBECONFIG=$HOME/.kube/config" >> $HOME/.bash_profile 17 | source $HOME/.bash_profile 18 | 19 | 20 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/06_install_flannel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Pull flannel images from Aliyun 6 | docker pull registry.cn-shenzhen.aliyuncs.com/cookcodeblog/flannel:v0.10.0-amd64 7 | docker tag registry.cn-shenzhen.aliyuncs.com/cookcodeblog/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64 8 | docker rmi registry.cn-shenzhen.aliyuncs.com/cookcodeblog/flannel:v0.10.0-amd64 9 | 10 | wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml 11 | kubectl apply -f kube-flannel.yml 12 | 13 | # Wait a while to let network takes effect 14 | sleep 10 15 | kubectl get pods --all-namespaces 16 | 17 | # Check component status 18 | kubectl get cs 19 | 20 | # Check pods status incase any pods are not in running status 21 | kubectl get pods --all-namespaces 22 | 23 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/CentOS7-Aliyun.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Base.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | # If the mirrorlist= does not work for you, as a fall back you can try the 9 | # remarked out baseurl= line instead. 10 | # 11 | # 12 | 13 | [base] 14 | name=CentOS-7 - Base - mirrors.aliyun.com 15 | failovermethod=priority 16 | baseurl=http://mirrors.aliyun.com/centos/7/os/$basearch/ 17 | http://mirrors.aliyuncs.com/centos/7/os/$basearch/ 18 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=os 19 | gpgcheck=1 20 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 21 | 22 | #released updates 23 | [updates] 24 | name=CentOS-7 - Updates - mirrors.aliyun.com 25 | failovermethod=priority 26 | baseurl=http://mirrors.aliyun.com/centos/7/updates/$basearch/ 27 | http://mirrors.aliyuncs.com/centos/7/updates/$basearch/ 28 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=updates 29 | gpgcheck=1 30 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 31 | 32 | #additional packages that may be useful 33 | [extras] 34 | name=CentOS-7 - Extras - mirrors.aliyun.com 35 | failovermethod=priority 36 | baseurl=http://mirrors.aliyun.com/centos/7/extras/$basearch/ 37 | http://mirrors.aliyuncs.com/centos/7/extras/$basearch/ 38 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=extras 39 | gpgcheck=1 40 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 41 | 42 | #additional packages that extend functionality of existing packages 43 | [centosplus] 44 | name=CentOS-7 - Plus - mirrors.aliyun.com 45 | failovermethod=priority 46 | baseurl=http://mirrors.aliyun.com/centos/7/centosplus/$basearch/ 47 | http://mirrors.aliyuncs.com/centos/7/centosplus/$basearch/ 48 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=centosplus 49 | gpgcheck=1 50 | enabled=0 51 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 52 | 53 | #contrib - packages by Centos Users 54 | [contrib] 55 | name=CentOS-7 - Contrib - mirrors.aliyun.com 56 | failovermethod=priority 57 | baseurl=http://mirrors.aliyun.com/centos/7/contrib/$basearch/ 58 | http://mirrors.aliyuncs.com/centos/7/contrib/$basearch/ 59 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=contrib 60 | gpgcheck=1 61 | enabled=0 62 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 63 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # 使用kubeadm一键部署kubernetes集群 4 | 5 | 6 | 7 | ## 详细文档 8 | 9 | 10 | 11 | 详细文档请参考我的博客文章: 12 | 13 | * [使用kubeadm一键部署kubernetes集群](https://blog.csdn.net/nklinsirui/article/details/80602724) 14 | 15 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/configure_cgroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 6 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/kubeadm_init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes images 15 | ./04_pull_kubernetes_images_from_aliyun.sh 16 | 17 | # Initialize k8s master 18 | ./05_kubeadm_init.sh 19 | 20 | # Install flannel Pod network 21 | ./06_install_flannel.sh 22 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/kubeadm_join_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ## Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes node images 15 | ./04_pull_kubernetes_node_images_from_aliyun.sh 16 | 17 | 18 | # Join kubernetes node 19 | 20 | 21 | # Put "kubeadm join" here from "kubeadm init" output 22 | # Example: kubeadm join 192.168.37.101:6443 --token mmxy0q.sjqca7zrzzj7czft --discovery-token-ca-cert-hash sha256:099421bf9b3c58e4e041e816ba6477477474614a17eca7f5d240eb733e7476bb 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/use_aliyun_docker_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sudo mkdir -p /etc/docker 6 | sudo tee /etc/docker/daemon.json <<-'EOF' 7 | { 8 | "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"] 9 | } 10 | EOF 11 | sudo systemctl daemon-reload 12 | sudo systemctl restart docker -------------------------------------------------------------------------------- /kubeadm_v1.10.3/use_aliyun_kubernetes_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cat < /etc/yum.repos.d/kubernetes.repo 6 | [kubernetes] 7 | name=Kubernetes 8 | baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 9 | enabled=1 10 | gpgcheck=1 11 | repo_gpgcheck=1 12 | gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 13 | EOF 14 | 15 | yum clean all 16 | yum makecache -y 17 | yum repolist all 18 | -------------------------------------------------------------------------------- /kubeadm_v1.10.3/use_aliyun_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | #wget -O /etc/yum.repos.d/CentOS7-Aliyun.repo http://mirrors.aliyun.com/repo/Centos-7.repo 6 | cp -p ./CentOS7-Aliyun.repo /etc/yum.repos.d 7 | yum clean all 8 | yum makecache -y 9 | yum repolist all 10 | 11 | yum install wget -y 12 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/01_pre_check_and_configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "###############################################" 6 | echo "Please ensure your OS is CentOS7 64 bits" 7 | echo "Please ensure your machine has full network connection and internet access" 8 | echo "Please ensure run this script with root user" 9 | 10 | # Check hostname, Mac addr and product_uuid 11 | echo "###############################################" 12 | echo "Please check hostname as below:" 13 | uname -a 14 | 15 | echo "###############################################" 16 | echo "Please check Mac addr and product_uuid as below:" 17 | ip link 18 | sudo cat /sys/class/dmi/id/product_uuid 19 | 20 | # Stop firewalld 21 | echo "###############################################" 22 | echo "Stop firewalld" 23 | systemctl stop firewalld 24 | systemctl disable firewalld 25 | 26 | # Disable SELinux 27 | echo "###############################################" 28 | echo "Disable SELinux" 29 | setenforce 0 30 | cp -p /etc/selinux/config /etc/selinux/config.bak$(date '+%Y%m%d%H%M%S') 31 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 32 | 33 | # Turn off Swap 34 | echo "###############################################" 35 | echo "Turn off Swap" 36 | swapoff -a 37 | cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') 38 | sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab 39 | sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab 40 | mount -a 41 | free -m 42 | cat /proc/swaps 43 | 44 | # Setup iptables (routing) 45 | echo "###############################################" 46 | echo "Setup iptables (routing)" 47 | cat < /etc/sysctl.d/k8s.conf 48 | net.bridge.bridge-nf-call-ip6tables = 1 49 | net.bridge.bridge-nf-call-iptables = 1 50 | net.bridge.bridge-nf-call-arptables = 1 51 | EOF 52 | 53 | sysctl --system 54 | 55 | # Use Aliyun Yum source 56 | echo "###############################################" 57 | echo "Use Aliyun Yum source" 58 | ./use_aliyun_yum_source.sh 59 | 60 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/02_install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Uninstall installed docker 6 | sudo yum remove docker \ 7 | docker-client \ 8 | docker-client-latest \ 9 | docker-common \ 10 | docker-latest \ 11 | docker-latest-logrotate \ 12 | docker-logrotate \ 13 | docker-selinux \ 14 | docker-engine-selinux \ 15 | docker-engine 16 | 17 | 18 | # Set up repository 19 | sudo yum install -y yum-utils device-mapper-persistent-data lvm2 20 | 21 | # Use Aliyun Docker 22 | sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 23 | 24 | 25 | # Install docker 26 | # on a new system with yum repo defined, forcing older version and ignoring obsoletes introduced by 17.06.0 27 | yum install -y --setopt=obsoletes=0 \ 28 | docker-ce-17.03.2.ce-1.el7.centos.x86_64 \ 29 | docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch 30 | 31 | systemctl enable docker 32 | systemctl start docker 33 | 34 | docker version 35 | 36 | 37 | # Use Aliyun docker registry 38 | ./use_aliyun_docker_registry.sh 39 | 40 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/03_install_kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ./use_aliyun_kubernetes_yum_source.sh 6 | 7 | setenforce 0 8 | # Use Kubernetes-cni-0.6.0 explictly 9 | # https://github.com/kubernetes/kubernetes/issues/75701 10 | yum install -y kubelet-1.11.0 kubeadm-1.11.0 kubectl-1.11.0 kubernetes-cni-0.6.0 11 | 12 | # Check installed Kubernetes packages 13 | yum list installed | grep kube 14 | 15 | systemctl enable kubelet && systemctl start kubelet 16 | 17 | # Configure cgroup matched with Docker 18 | ./configure_cgroup.sh 19 | systemctl daemon-reload 20 | systemctl restart kubelet 21 | 22 | 23 | # Don't set pause-amd64 image for kubelet service 24 | # Use Kubernetes default pause image 25 | 26 | 27 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/04_pull_kubernetes_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check version in https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 6 | # Search "Running kubeadm without an internet connection" 7 | # For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: 8 | KUBE_VERSION=v1.11.0 9 | KUBE_PAUSE_VERSION=3.1 10 | ETCD_VERSION=3.2.18 11 | CORE_DNS_VERSION=1.1.3 12 | 13 | GCR_URL=k8s.gcr.io 14 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 15 | 16 | # When test v1.11.0, I found Kubernetes depends on both pause-amd64:3.1 and pause:3.1 17 | 18 | images=(kube-proxy-amd64:${KUBE_VERSION} 19 | kube-scheduler-amd64:${KUBE_VERSION} 20 | kube-controller-manager-amd64:${KUBE_VERSION} 21 | kube-apiserver-amd64:${KUBE_VERSION} 22 | pause-amd64:${KUBE_PAUSE_VERSION} 23 | pause:${KUBE_PAUSE_VERSION} 24 | etcd-amd64:${ETCD_VERSION} 25 | coredns:${CORE_DNS_VERSION}) 26 | 27 | 28 | for imageName in ${images[@]} ; do 29 | docker pull $ALIYUN_URL/$imageName 30 | docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName 31 | docker rmi $ALIYUN_URL/$imageName 32 | done 33 | 34 | docker images 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/04_pull_kubernetes_node_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check version in https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 6 | # Search "Running kubeadm without an internet connection" 7 | # For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: 8 | KUBE_VERSION=v1.11.0 9 | KUBE_PAUSE_VERSION=3.1 10 | 11 | GCR_URL=k8s.gcr.io 12 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 13 | 14 | # When test v1.11.0, I found Kubernetes depends on both pause-amd64:3.1 and pause:3.1 15 | 16 | images=(kube-proxy-amd64:${KUBE_VERSION} 17 | pause-amd64:${KUBE_PAUSE_VERSION} 18 | pause:${KUBE_PAUSE_VERSION}) 19 | 20 | 21 | for imageName in ${images[@]} ; do 22 | docker pull $ALIYUN_URL/$imageName 23 | docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName 24 | docker rmi $ALIYUN_URL/$imageName 25 | done 26 | 27 | docker images 28 | 29 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/05_kubeadm_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Reset firstly if ran kubeadm init before 6 | kubeadm reset -f 7 | 8 | # kubeadm init with flannel network 9 | kubeadm init --kubernetes-version=v1.11.0 --pod-network-cidr=10.244.0.0/16 10 | 11 | 12 | mkdir -p $HOME/.kube 13 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 14 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 15 | cp -p $HOME/.bash_profile $HOME/.bash_profile.bak$(date '+%Y%m%d%H%M%S') 16 | echo "export KUBECONFIG=$HOME/.kube/config" >> $HOME/.bash_profile 17 | source $HOME/.bash_profile 18 | 19 | 20 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/06_install_flannel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | 6 | # Pull flannel images from Aliyun 7 | docker pull registry.cn-shenzhen.aliyuncs.com/cookcodeblog/flannel:v0.10.0-amd64 8 | docker tag registry.cn-shenzhen.aliyuncs.com/cookcodeblog/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64 9 | docker rmi registry.cn-shenzhen.aliyuncs.com/cookcodeblog/flannel:v0.10.0-amd64 10 | 11 | wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml 12 | kubectl apply -f kube-flannel.yml 13 | 14 | # Wait a while to let network takes effect 15 | sleep 10 16 | kubectl get pods --all-namespaces 17 | 18 | # Check component status 19 | kubectl get cs 20 | 21 | # Check pods status incase any pods are not in running status 22 | kubectl get pods --all-namespaces | grep -v Running 23 | 24 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/CentOS7-Aliyun.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Base.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | # If the mirrorlist= does not work for you, as a fall back you can try the 9 | # remarked out baseurl= line instead. 10 | # 11 | # 12 | 13 | [base] 14 | name=CentOS-7 - Base - mirrors.aliyun.com 15 | failovermethod=priority 16 | baseurl=http://mirrors.aliyun.com/centos/7/os/$basearch/ 17 | http://mirrors.aliyuncs.com/centos/7/os/$basearch/ 18 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=os 19 | gpgcheck=1 20 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 21 | 22 | #released updates 23 | [updates] 24 | name=CentOS-7 - Updates - mirrors.aliyun.com 25 | failovermethod=priority 26 | baseurl=http://mirrors.aliyun.com/centos/7/updates/$basearch/ 27 | http://mirrors.aliyuncs.com/centos/7/updates/$basearch/ 28 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=updates 29 | gpgcheck=1 30 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 31 | 32 | #additional packages that may be useful 33 | [extras] 34 | name=CentOS-7 - Extras - mirrors.aliyun.com 35 | failovermethod=priority 36 | baseurl=http://mirrors.aliyun.com/centos/7/extras/$basearch/ 37 | http://mirrors.aliyuncs.com/centos/7/extras/$basearch/ 38 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=extras 39 | gpgcheck=1 40 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 41 | 42 | #additional packages that extend functionality of existing packages 43 | [centosplus] 44 | name=CentOS-7 - Plus - mirrors.aliyun.com 45 | failovermethod=priority 46 | baseurl=http://mirrors.aliyun.com/centos/7/centosplus/$basearch/ 47 | http://mirrors.aliyuncs.com/centos/7/centosplus/$basearch/ 48 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=centosplus 49 | gpgcheck=1 50 | enabled=0 51 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 52 | 53 | #contrib - packages by Centos Users 54 | [contrib] 55 | name=CentOS-7 - Contrib - mirrors.aliyun.com 56 | failovermethod=priority 57 | baseurl=http://mirrors.aliyun.com/centos/7/contrib/$basearch/ 58 | http://mirrors.aliyuncs.com/centos/7/contrib/$basearch/ 59 | #mirrorlist=http://mirrorlist.centos.org/?release=7&arch=$basearch&repo=contrib 60 | gpgcheck=1 61 | enabled=0 62 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 63 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # 使用kubeadm一键部署kubernetes集群 4 | 5 | 6 | 7 | Kubernetes v1.11.0 已经发布,在原来 Kubernetes v1.10.3 基础上修订一键部署脚本。 8 | 9 | 10 | 11 | ## 详细文档 12 | 13 | 14 | 15 | 详细文档请参考我的博客文章: 16 | 17 | * [使用kubeadm一键部署kubernetes集群](https://blog.csdn.net/nklinsirui/article/details/80602724) 18 | 19 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/configure_cgroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 6 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/kubeadm_init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes images 15 | ./04_pull_kubernetes_images_from_aliyun.sh 16 | 17 | # Initialize k8s master 18 | ./05_kubeadm_init.sh 19 | 20 | # Install flannel Pod network 21 | ./06_install_flannel.sh 22 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/kubeadm_join_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ## Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes node images 15 | ./04_pull_kubernetes_node_images_from_aliyun.sh 16 | 17 | 18 | # Join kubernetes node 19 | 20 | # Put "kubeadm join" here from "kubeadm init" output 21 | # Example: kubeadm join 192.168.37.101:6443 --token mmxy0q.sjqca7zrzzj7czft --discovery-token-ca-cert-hash sha256:099421bf9b3c58e4e041e816ba6477477474614a17eca7f5d240eb733e7476bb 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/use_aliyun_docker_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sudo mkdir -p /etc/docker 6 | sudo tee /etc/docker/daemon.json <<-'EOF' 7 | { 8 | "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"] 9 | } 10 | EOF 11 | sudo systemctl daemon-reload 12 | sudo systemctl restart docker -------------------------------------------------------------------------------- /kubeadm_v1.11.0/use_aliyun_kubernetes_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cat < /etc/yum.repos.d/kubernetes.repo 6 | [kubernetes] 7 | name=Kubernetes 8 | baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 9 | enabled=1 10 | gpgcheck=1 11 | repo_gpgcheck=1 12 | gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 13 | EOF 14 | 15 | yum clean all 16 | yum makecache -y 17 | yum repolist all 18 | -------------------------------------------------------------------------------- /kubeadm_v1.11.0/use_aliyun_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | #wget -O /etc/yum.repos.d/CentOS7-Aliyun.repo http://mirrors.aliyun.com/repo/Centos-7.repo 6 | cp -p ./CentOS7-Aliyun.repo /etc/yum.repos.d 7 | yum clean all 8 | yum makecache -y 9 | yum repolist all 10 | 11 | yum install wget -y 12 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/01_pre_check_and_configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "###############################################" 6 | echo "Please ensure your OS is CentOS7 64 bits" 7 | echo "Please ensure your machine has full network connection and internet access" 8 | echo "Please ensure run this script with root user" 9 | 10 | # Check hostname, Mac addr and product_uuid 11 | echo "###############################################" 12 | echo "Please check hostname as below:" 13 | uname -a 14 | 15 | echo "###############################################" 16 | echo "Please check Mac addr and product_uuid as below:" 17 | ip link 18 | sudo cat /sys/class/dmi/id/product_uuid 19 | 20 | # Stop firewalld 21 | echo "###############################################" 22 | echo "Stop firewalld" 23 | systemctl stop firewalld 24 | systemctl disable firewalld 25 | 26 | # Disable SELinux 27 | echo "###############################################" 28 | echo "Disable SELinux" 29 | setenforce 0 30 | cp -p /etc/selinux/config /etc/selinux/config.bak$(date '+%Y%m%d%H%M%S') 31 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 32 | 33 | # Turn off Swap 34 | echo "###############################################" 35 | echo "Turn off Swap" 36 | swapoff -a 37 | cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') 38 | sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab 39 | sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab 40 | mount -a 41 | free -m 42 | cat /proc/swaps 43 | 44 | # Setup iptables (routing) 45 | echo "###############################################" 46 | echo "Setup iptables (routing)" 47 | cat < /etc/sysctl.d/k8s.conf 48 | net.bridge.bridge-nf-call-ip6tables = 1 49 | net.bridge.bridge-nf-call-iptables = 1 50 | net.bridge.bridge-nf-call-arptables = 1 51 | EOF 52 | 53 | sysctl --system 54 | 55 | # Use Aliyun Yum source 56 | echo "###############################################" 57 | echo "Use Aliyun Yum source" 58 | ./use_aliyun_yum_source.sh 59 | 60 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/02_install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Uninstall installed docker 6 | sudo yum remove docker \ 7 | docker-client \ 8 | docker-client-latest \ 9 | docker-common \ 10 | docker-latest \ 11 | docker-latest-logrotate \ 12 | docker-logrotate \ 13 | docker-selinux \ 14 | docker-engine-selinux \ 15 | docker-engine 16 | 17 | 18 | # Set up repository 19 | sudo yum install -y yum-utils device-mapper-persistent-data lvm2 20 | 21 | # Use Aliyun Docker 22 | sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 23 | 24 | 25 | # Install a validated docker version 26 | # https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#external-dependencies 27 | yum install docker-ce-18.06.0.ce -y 28 | 29 | systemctl enable docker 30 | systemctl start docker 31 | 32 | docker version 33 | 34 | 35 | # Use Aliyun docker registry 36 | ./use_aliyun_docker_registry.sh 37 | 38 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/03_install_kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ./use_aliyun_kubernetes_yum_source.sh 6 | 7 | setenforce 0 8 | # Use Kubernetes-cni-0.6.0 explictly 9 | # https://github.com/kubernetes/kubernetes/issues/75701 10 | yum install -y kubelet-1.13.0 kubeadm-1.13.0 kubectl-1.13.0 kubernetes-cni-0.6.0 11 | 12 | # Check installed Kubernetes packages 13 | yum list installed | grep kube 14 | 15 | systemctl enable kubelet && systemctl start kubelet 16 | 17 | # Configure cgroup matched with Docker 18 | ./configure_cgroup.sh 19 | systemctl daemon-reload 20 | systemctl restart kubelet 21 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/04_pull_kubernetes_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check version in https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 6 | # Search "Running kubeadm without an internet connection" 7 | # For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: 8 | KUBE_VERSION=v1.13.0 9 | KUBE_PAUSE_VERSION=3.1 10 | ETCD_VERSION=3.2.24 11 | CORE_DNS_VERSION=1.2.6 12 | 13 | GCR_URL=k8s.gcr.io 14 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 15 | 16 | # In Kubernetes 1.12 and later, the k8s.gcr.io/kube-*, k8s.gcr.io/etcd and k8s.gcr.io/pause images don’t require an -${ARCH} suffix 17 | images=(kube-proxy:${KUBE_VERSION} 18 | kube-scheduler:${KUBE_VERSION} 19 | kube-controller-manager:${KUBE_VERSION} 20 | kube-apiserver:${KUBE_VERSION} 21 | pause:${KUBE_PAUSE_VERSION} 22 | etcd:${ETCD_VERSION} 23 | coredns:${CORE_DNS_VERSION}) 24 | 25 | 26 | for imageName in ${images[@]} ; do 27 | docker pull $ALIYUN_URL/$imageName 28 | docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName 29 | docker rmi $ALIYUN_URL/$imageName 30 | done 31 | 32 | docker images 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/04_pull_kubernetes_node_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check version in https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 6 | # Search "Running kubeadm without an internet connection" 7 | # For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: 8 | KUBE_VERSION=v1.13.0 9 | KUBE_PAUSE_VERSION=3.1 10 | 11 | GCR_URL=k8s.gcr.io 12 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 13 | 14 | 15 | # In Kubernetes 1.12 and later, the k8s.gcr.io/kube-*, k8s.gcr.io/etcd and k8s.gcr.io/pause images don’t require an -${ARCH} suffix 16 | images=(kube-proxy:${KUBE_VERSION} 17 | pause:${KUBE_PAUSE_VERSION}) 18 | 19 | 20 | for imageName in ${images[@]} ; do 21 | docker pull $ALIYUN_URL/$imageName 22 | docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName 23 | docker rmi $ALIYUN_URL/$imageName 24 | done 25 | 26 | docker images 27 | 28 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/05_kubeadm_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Reset firstly if ran kubeadm init before 6 | kubeadm reset -f 7 | 8 | # kubeadm init with flannel network 9 | kubeadm init --kubernetes-version=v1.13.0 --pod-network-cidr=10.244.0.0/16 10 | 11 | 12 | mkdir -p $HOME/.kube 13 | sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config 14 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 15 | cp -p $HOME/.bash_profile $HOME/.bash_profile.bak$(date '+%Y%m%d%H%M%S') 16 | echo "export KUBECONFIG=$HOME/.kube/config" >> $HOME/.bash_profile 17 | source $HOME/.bash_profile 18 | 19 | 20 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/06_install_flannel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ./pull_flannel_images_from_aliyun.sh 6 | 7 | # https://v1-13.docs.kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network 8 | wget -O kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml 9 | 10 | kubectl apply -f kube-flannel.yml 11 | 12 | # Wait a while to let network takes effect 13 | sleep 30 14 | ./k8s_health_check.sh 15 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/CentOS7-Aliyun.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Base.repo 2 | # 3 | # The mirror system uses the connecting IP address of the client and the 4 | # update status of each mirror to pick mirrors that are updated to and 5 | # geographically close to the client. You should use this for CentOS updates 6 | # unless you are manually picking other mirrors. 7 | # 8 | # If the mirrorlist= does not work for you, as a fall back you can try the 9 | # remarked out baseurl= line instead. 10 | # 11 | # 12 | 13 | [base] 14 | name=CentOS-7 - Base - mirrors.aliyun.com 15 | failovermethod=priority 16 | baseurl=http://mirrors.aliyun.com/centos/7/os/$basearch/ 17 | gpgcheck=1 18 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 19 | 20 | #released updates 21 | [updates] 22 | name=CentOS-7 - Updates - mirrors.aliyun.com 23 | failovermethod=priority 24 | baseurl=http://mirrors.aliyun.com/centos/7/updates/$basearch/ 25 | gpgcheck=1 26 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 27 | 28 | #additional packages that may be useful 29 | [extras] 30 | name=CentOS-7 - Extras - mirrors.aliyun.com 31 | failovermethod=priority 32 | baseurl=http://mirrors.aliyun.com/centos/7/extras/$basearch/ 33 | gpgcheck=1 34 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 35 | 36 | #additional packages that extend functionality of existing packages 37 | [centosplus] 38 | name=CentOS-7 - Plus - mirrors.aliyun.com 39 | failovermethod=priority 40 | baseurl=http://mirrors.aliyun.com/centos/7/centosplus/$basearch/ 41 | gpgcheck=1 42 | enabled=0 43 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 44 | 45 | #contrib - packages by Centos Users 46 | [contrib] 47 | name=CentOS-7 - Contrib - mirrors.aliyun.com 48 | failovermethod=priority 49 | baseurl=http://mirrors.aliyun.com/centos/7/contrib/$basearch/ 50 | gpgcheck=1 51 | enabled=0 52 | gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7 53 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/README.md: -------------------------------------------------------------------------------- 1 | # Install Kubernetes v1.13.0 2 | 3 | https://v1-13.docs.kubernetes.io/docs/setup/independent/create-cluster-kubeadm/ 4 | 5 | Requires 2 CPUs 6 | 7 | * Use Aliyun Yum repo 8 | * Use Aliyun Docker repo 9 | * Use Aliyun Kubernetes repo 10 | * Docker 18.06 11 | * kubeadm 1.13 12 | * kubelet 1.13 13 | * kubectl 1.13 14 | * flannel v0.10.0 15 | 16 | ## Kubernetes Images 17 | 18 | Since Kubernetes 1.11 19 | 20 | ```bash 21 | kubeadm config images list 22 | kubeadm config images pull 23 | ``` 24 | 25 | * k8s.gcr.io/kube-apiserver:v1.13.0 26 | * k8s.gcr.io/kube-controller-manager:v1.13.0 27 | * k8s.gcr.io/kube-scheduler:v1.13.0 28 | * k8s.gcr.io/kube-proxy:v1.13.0 29 | * k8s.gcr.io/pause:3.1 30 | * k8s.gcr.io/etcd:3.2.24 31 | * k8s.gcr.io/coredns:1.2.6 32 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/configure_cgroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 6 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/epel-7-Aliyun.repo: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 7 - $basearch 3 | baseurl=http://mirrors.aliyun.com/epel/7/$basearch 4 | failovermethod=priority 5 | enabled=1 6 | gpgcheck=0 7 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 8 | 9 | [epel-debuginfo] 10 | name=Extra Packages for Enterprise Linux 7 - $basearch - Debug 11 | baseurl=http://mirrors.aliyun.com/epel/7/$basearch/debug 12 | failovermethod=priority 13 | enabled=0 14 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 15 | gpgcheck=0 16 | 17 | [epel-source] 18 | name=Extra Packages for Enterprise Linux 7 - $basearch - Source 19 | baseurl=http://mirrors.aliyun.com/epel/7/SRPMS 20 | failovermethod=priority 21 | enabled=0 22 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 23 | gpgcheck=0 24 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/k8s_health_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Kubernetes Cheatsheet 6 | # https://v1-13.docs.kubernetes.io/docs/reference/kubectl/cheatsheet/ 7 | # kubectl --help 8 | 9 | # Display cluster info 10 | kubectl cluster-info 11 | 12 | # Display pds 13 | kubectl get pods --all-namespaces -o wide 14 | 15 | # Check component status 16 | kubectl get cs 17 | 18 | # Check pods status incase any pods are not in running status 19 | kubectl get pods --all-namespaces | grep -v Running 20 | 21 | # Nodes 22 | kubectl get nodes 23 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/kubeadm_init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes images 15 | ./04_pull_kubernetes_images_from_aliyun.sh 16 | 17 | # Initialize k8s master 18 | ./05_kubeadm_init.sh 19 | 20 | # Install flannel Pod network 21 | ./06_install_flannel.sh 22 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/kubeadm_join_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ## Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes node images 15 | ./04_pull_kubernetes_node_images_from_aliyun.sh 16 | 17 | # Pull flannel images 18 | ./pull_flannel_images_from_aliyun.sh 19 | 20 | 21 | 22 | # Join kubernetes node 23 | 24 | # Put "kubeadm join" here from "kubeadm init" output 25 | # Example: kubeadm join 192.168.37.101:6443 --token mmxy0q.sjqca7zrzzj7czft --discovery-token-ca-cert-hash sha256:099421bf9b3c58e4e041e816ba6477477474614a17eca7f5d240eb733e7476bb 26 | 27 | # Run `kubeadm token create --print-join-command` in Kubernetes master to get `kubeadm join` command 28 | 29 | 30 | # To resolve need specify API server and x509 error 31 | # https://github.com/kubernetes/kubernetes/issues/48378 32 | mkdir -p $HOME/.kube 33 | sudo cp /etc/kubernetes/kubelet.conf $HOME/.kube/config 34 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 35 | cp -p $HOME/.bash_profile $HOME/.bash_profile.bak$(date '+%Y%m%d%H%M%S') 36 | echo "export KUBECONFIG=$HOME/.kube/config" >> $HOME/.bash_profile 37 | source $HOME/.bash_profile 38 | 39 | 40 | ./k8s_health_check.sh 41 | 42 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/pull_flannel_images_from_aliyun.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Check images in https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml 6 | 7 | FLANNEL_VERSION=v0.11.0 8 | 9 | 10 | COREOS_URL=quay.io/coreos 11 | ALIYUN_URL=registry.cn-shenzhen.aliyuncs.com/cookcodeblog 12 | 13 | images=(flannel:${FLANNEL_VERSION}-amd64 14 | flannel:${FLANNEL_VERSION}-arm64 15 | flannel:${FLANNEL_VERSION}-arm 16 | flannel:${FLANNEL_VERSION}-ppc64le 17 | flannel:${FLANNEL_VERSION}-s390x) 18 | 19 | 20 | for imageName in ${images[@]} ; do 21 | docker pull $ALIYUN_URL/$imageName 22 | docker tag $ALIYUN_URL/$imageName $COREOS_URL/$imageName 23 | docker rmi $ALIYUN_URL/$imageName 24 | done 25 | 26 | docker images 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/use_aliyun_docker_registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sudo mkdir -p /etc/docker 6 | sudo tee /etc/docker/daemon.json <<-'EOF' 7 | { 8 | "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"] 9 | } 10 | EOF 11 | sudo systemctl daemon-reload 12 | sudo systemctl restart docker -------------------------------------------------------------------------------- /kubeadm_v1.13.0/use_aliyun_kubernetes_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cat < /etc/yum.repos.d/kubernetes.repo 6 | [kubernetes] 7 | name=Kubernetes 8 | baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 9 | enabled=1 10 | gpgcheck=1 11 | repo_gpgcheck=1 12 | gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 13 | EOF 14 | 15 | yum clean all 16 | yum makecache -y 17 | yum repolist all 18 | -------------------------------------------------------------------------------- /kubeadm_v1.13.0/use_aliyun_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | #wget -O /etc/yum.repos.d/CentOS7-Aliyun.repo http://mirrors.aliyun.com/repo/Centos-7.repo 6 | cp -p ./CentOS7-Aliyun.repo /etc/yum.repos.d 7 | cp -p ./epel-7-Aliyun.repo /etc/yum.repos.d 8 | 9 | yum clean all 10 | yum makecache -y 11 | yum repolist all 12 | 13 | yum install wget -y 14 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/01_pre_check_and_configure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "###############################################" 4 | echo "Please ensure your OS is CentOS7 64 bits" 5 | echo "Please ensure your machine has full network connection and internet access" 6 | echo "Please ensure run this script with root user" 7 | 8 | # Check hostname, Mac addr and product_uuid 9 | echo "###############################################" 10 | echo "Please check hostname as below:" 11 | uname -a 12 | 13 | echo "###############################################" 14 | echo "Please check Mac addr and product_uuid as below:" 15 | ip link 16 | sudo cat /sys/class/dmi/id/product_uuid 17 | 18 | echo "###############################################" 19 | echo "Please check default route:" 20 | ip route show 21 | 22 | # Stop firewalld 23 | echo "###############################################" 24 | echo "Stop firewalld" 25 | sudo systemctl stop firewalld 26 | sudo systemctl disable firewalld 27 | 28 | # Disable SELinux 29 | echo "###############################################" 30 | echo "Disable SELinux" 31 | sudo getenforce 32 | 33 | sudo setenforce 0 34 | sudo cp -p /etc/selinux/config /etc/selinux/config.bak$(date '+%Y%m%d%H%M%S') 35 | sudo sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 36 | 37 | sudo getenforce 38 | 39 | # Turn off Swap 40 | echo "###############################################" 41 | echo "Turn off Swap" 42 | free -m 43 | sudo cat /proc/swaps 44 | 45 | sudo swapoff -a 46 | 47 | sudo cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') 48 | sudo sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab 49 | sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab 50 | sudo mount -a 51 | 52 | free -m 53 | sudo cat /proc/swaps 54 | 55 | # Setup iptables (routing) 56 | echo "###############################################" 57 | echo "Setup iptables (routing)" 58 | sudo cat < /etc/sysctl.d/k8s.conf 59 | net.bridge.bridge-nf-call-ip6tables = 1 60 | net.bridge.bridge-nf-call-iptables = 1 61 | net.bridge.bridge-nf-call-arptables = 1 62 | net.ipv4.ip_forward = 1 63 | EOF 64 | 65 | sudo sysctl --system 66 | 67 | 68 | # Check ports 69 | echo "###############################################" 70 | echo "Check API server port(s)" 71 | netstat -nlp | grep "8080\|6443" 72 | 73 | echo "Check ETCD port(s)" 74 | netstat -nlp | grep "2379\|2380" 75 | 76 | echo "Check port(s): kublet, kube-scheduler, kube-controller-manager" 77 | netstat -nlp | grep "10250\|10251\|10252" 78 | 79 | 80 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/02_install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Uninstall installed docker 6 | sudo yum remove -y docker \ 7 | docker-client \ 8 | docker-client-latest \ 9 | docker-common \ 10 | docker-latest \ 11 | docker-latest-logrotate \ 12 | docker-logrotate \ 13 | docker-selinux \ 14 | docker-engine-selinux \ 15 | docker-engine 16 | 17 | 18 | # Set up repository 19 | sudo yum install -y yum-utils device-mapper-persistent-data lvm2 20 | 21 | # Use Aliyun Docker 22 | sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 23 | 24 | # Install a validated docker version 25 | sudo yum install -y docker-ce-19.03.11 docker-ce-cli-19.03.11 containerd.io-1.2.13 26 | 27 | # Setup Docker daemon 28 | mkdir -p /etc/docker 29 | 30 | sudo cat < /etc/yum.repos.d/kubernetes.repo 6 | [kubernetes] 7 | name=Kubernetes 8 | baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ 9 | enabled=1 10 | gpgcheck=1 11 | repo_gpgcheck=1 12 | gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 13 | EOF 14 | 15 | yum clean all 16 | yum makecache -y 17 | yum repolist all 18 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/aliyun/use_aliyun_yum_source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | #wget -O /etc/yum.repos.d/CentOS7-Aliyun.repo http://mirrors.aliyun.com/repo/Centos-7.repo 6 | sudo cp -p ./repo/CentOS7-Aliyun.repo /etc/yum.repos.d 7 | sudo cp -p ./repo/epel-7-Aliyun.repo /etc/yum.repos.d 8 | 9 | sudo yum clean all 10 | sudo yum makecache -y 11 | sudo yum repolist all 12 | 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/enable_kubectl_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | 5 | 6 | # Make kubectl works 7 | 8 | mkdir -p $HOME/.kube 9 | sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config 10 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | cp -p $HOME/.bash_profile $HOME/.bash_profile.bak$(date '+%Y%m%d%H%M%S') 13 | echo "export KUBECONFIG=$HOME/.kube/config" >> $HOME/.bash_profile 14 | source $HOME/.bash_profile 15 | 16 | 17 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/enable_kubectl_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | 5 | 6 | # Make kubectl works 7 | 8 | mkdir -p $HOME/.kube 9 | sudo cp /etc/kubernetes/kubelet.conf $HOME/.kube/config 10 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 11 | cp -p $HOME/.bash_profile $HOME/.bash_profile.bak$(date '+%Y%m%d%H%M%S') 12 | echo "export KUBECONFIG=$HOME/.kube/config" >> $HOME/.bash_profile 13 | source $HOME/.bash_profile 14 | 15 | 16 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/haproxy/haproxy.template.cfg: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # apiserver frontend which proxys to the masters 3 | #--------------------------------------------------------------------- 4 | frontend apiserver 5 | bind *:6443 6 | mode tcp 7 | option tcplog 8 | default_backend apiserver 9 | 10 | #--------------------------------------------------------------------- 11 | # round robin balancing for apiserver 12 | #--------------------------------------------------------------------- 13 | backend apiserver 14 | option httpchk GET /healthz 15 | http-check expect status 200 16 | mode tcp 17 | option ssl-hello-chk 18 | balance roundrobin 19 | server k8s-master-01 192.168.0.152:6443 check 20 | server k8s-master-02 192.168.0.153:6443 check 21 | server k8s-master-03 192.168.0.162:6443 check 22 | 23 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/k8s_health_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Kubernetes Cheatsheet 6 | # https://v1-13.docs.kubernetes.io/docs/reference/kubectl/cheatsheet/ 7 | # kubectl --help 8 | 9 | # Display cluster info 10 | kubectl cluster-info 11 | 12 | # Nodes 13 | kubectl get nodes 14 | 15 | # Display pds 16 | kubectl get pods --all-namespaces -o wide 17 | 18 | # Check pods status incase any pods are not in running status 19 | kubectl get pods --all-namespaces | grep -v Running 20 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/keepalived/check_apiserver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | APISERVER_VIP=$1 5 | APISERVER_DEST_PORT=$2 6 | 7 | errorExit() { 8 | echo "*** $*" 1>&2 9 | exit 1 10 | } 11 | 12 | curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/" 13 | if ip addr | grep -q ${APISERVER_VIP}; then 14 | curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/" 15 | fi -------------------------------------------------------------------------------- /kubeadm_v1.19.3/keepalived/keepalived.bakcup.template.cfg: -------------------------------------------------------------------------------- 1 | ! /etc/keepalived/keepalived.conf 2 | ! Configuration File for keepalived 3 | global_defs { 4 | router_id LVS_DEVEL 5 | } 6 | vrrp_script check_apiserver { 7 | script "/etc/keepalived/check_apiserver.sh 192.168.0.170 6443" 8 | interval 3 9 | weight -2 10 | fall 10 11 | rise 2 12 | } 13 | 14 | vrrp_instance VI_1 { 15 | state BACKUP 16 | interface ens33 17 | virtual_router_id 51 18 | priority 100 19 | authentication { 20 | auth_type PASS 21 | auth_pass Keep@lived 22 | } 23 | virtual_ipaddress { 24 | 192.168.0.170 25 | } 26 | track_script { 27 | check_apiserver 28 | } 29 | } -------------------------------------------------------------------------------- /kubeadm_v1.19.3/keepalived/keepalived.master.template.cfg: -------------------------------------------------------------------------------- 1 | ! /etc/keepalived/keepalived.conf 2 | ! Configuration File for keepalived 3 | global_defs { 4 | router_id LVS_DEVEL 5 | } 6 | vrrp_script check_apiserver { 7 | script "/etc/keepalived/check_apiserver.sh 192.168.0.170 6443" 8 | interval 3 9 | weight -2 10 | fall 10 11 | rise 2 12 | } 13 | 14 | vrrp_instance VI_1 { 15 | state MASTER 16 | interface ens33 17 | virtual_router_id 51 18 | priority 101 19 | authentication { 20 | auth_type PASS 21 | auth_pass Keep@lived 22 | } 23 | virtual_ipaddress { 24 | 192.168.0.170 25 | } 26 | track_script { 27 | check_apiserver 28 | } 29 | } -------------------------------------------------------------------------------- /kubeadm_v1.19.3/kubeadm_init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes images 15 | ./04_pull_kubernetes_images_from_aliyun.sh 16 | 17 | # Initialize k8s master 18 | ./05_kubeadm_init.sh 19 | 20 | # Install calico Pod network 21 | ./06_install_calico.sh 22 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/kubeadm_join_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ## Pre-configure 6 | ./01_pre_check_and_configure.sh 7 | 8 | # Install Docker 9 | ./02_install_docker.sh 10 | 11 | # Install kubelet kubeadm kubectl 12 | ./03_install_kubernetes.sh 13 | 14 | # Pull kubernetes node images 15 | ./04_pull_kubernetes_node_images_from_aliyun.sh 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/metrics-server/metrics-server-insecure-hostnetwork.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:aggregated-metrics-reader 6 | labels: 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 9 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 10 | rules: 11 | - apiGroups: ["metrics.k8s.io"] 12 | resources: ["pods", "nodes"] 13 | verbs: ["get", "list", "watch"] 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: metrics-server:system:auth-delegator 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: system:auth-delegator 23 | subjects: 24 | - kind: ServiceAccount 25 | name: metrics-server 26 | namespace: kube-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | kind: RoleBinding 30 | metadata: 31 | name: metrics-server-auth-reader 32 | namespace: kube-system 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: Role 36 | name: extension-apiserver-authentication-reader 37 | subjects: 38 | - kind: ServiceAccount 39 | name: metrics-server 40 | namespace: kube-system 41 | --- 42 | apiVersion: apiregistration.k8s.io/v1beta1 43 | kind: APIService 44 | metadata: 45 | name: v1beta1.metrics.k8s.io 46 | spec: 47 | service: 48 | name: metrics-server 49 | namespace: kube-system 50 | group: metrics.k8s.io 51 | version: v1beta1 52 | insecureSkipTLSVerify: true 53 | groupPriorityMinimum: 100 54 | versionPriority: 100 55 | --- 56 | apiVersion: v1 57 | kind: ServiceAccount 58 | metadata: 59 | name: metrics-server 60 | namespace: kube-system 61 | --- 62 | apiVersion: apps/v1 63 | kind: Deployment 64 | metadata: 65 | name: metrics-server 66 | namespace: kube-system 67 | labels: 68 | k8s-app: metrics-server 69 | spec: 70 | selector: 71 | matchLabels: 72 | k8s-app: metrics-server 73 | template: 74 | metadata: 75 | name: metrics-server 76 | labels: 77 | k8s-app: metrics-server 78 | spec: 79 | serviceAccountName: metrics-server 80 | volumes: 81 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 82 | - name: tmp-dir 83 | emptyDir: {} 84 | containers: 85 | - name: metrics-server 86 | image: registry.cn-shenzhen.aliyuncs.com/cookcodeblog/metrics-server:v0.3.7 87 | imagePullPolicy: IfNotPresent 88 | args: 89 | - --cert-dir=/tmp 90 | - --secure-port=4443 91 | - --kubelet-insecure-tls 92 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 93 | ports: 94 | - name: main-port 95 | containerPort: 4443 96 | protocol: TCP 97 | securityContext: 98 | readOnlyRootFilesystem: true 99 | runAsNonRoot: true 100 | runAsUser: 1000 101 | volumeMounts: 102 | - name: tmp-dir 103 | mountPath: /tmp 104 | nodeSelector: 105 | kubernetes.io/os: linux 106 | hostNetwork: true 107 | --- 108 | apiVersion: v1 109 | kind: Service 110 | metadata: 111 | name: metrics-server 112 | namespace: kube-system 113 | labels: 114 | kubernetes.io/name: "Metrics-server" 115 | kubernetes.io/cluster-service: "true" 116 | spec: 117 | selector: 118 | k8s-app: metrics-server 119 | ports: 120 | - port: 443 121 | protocol: TCP 122 | targetPort: main-port 123 | --- 124 | apiVersion: rbac.authorization.k8s.io/v1 125 | kind: ClusterRole 126 | metadata: 127 | name: system:metrics-server 128 | rules: 129 | - apiGroups: 130 | - "" 131 | resources: 132 | - pods 133 | - nodes 134 | - nodes/stats 135 | - namespaces 136 | - configmaps 137 | verbs: 138 | - get 139 | - list 140 | - watch 141 | --- 142 | apiVersion: rbac.authorization.k8s.io/v1 143 | kind: ClusterRoleBinding 144 | metadata: 145 | name: system:metrics-server 146 | roleRef: 147 | apiGroup: rbac.authorization.k8s.io 148 | kind: ClusterRole 149 | name: system:metrics-server 150 | subjects: 151 | - kind: ServiceAccount 152 | name: metrics-server 153 | namespace: kube-system 154 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/metrics-server/metrics-server-insecure.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:aggregated-metrics-reader 6 | labels: 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 9 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 10 | rules: 11 | - apiGroups: ["metrics.k8s.io"] 12 | resources: ["pods", "nodes"] 13 | verbs: ["get", "list", "watch"] 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: metrics-server:system:auth-delegator 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: system:auth-delegator 23 | subjects: 24 | - kind: ServiceAccount 25 | name: metrics-server 26 | namespace: kube-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | kind: RoleBinding 30 | metadata: 31 | name: metrics-server-auth-reader 32 | namespace: kube-system 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: Role 36 | name: extension-apiserver-authentication-reader 37 | subjects: 38 | - kind: ServiceAccount 39 | name: metrics-server 40 | namespace: kube-system 41 | --- 42 | apiVersion: apiregistration.k8s.io/v1beta1 43 | kind: APIService 44 | metadata: 45 | name: v1beta1.metrics.k8s.io 46 | spec: 47 | service: 48 | name: metrics-server 49 | namespace: kube-system 50 | group: metrics.k8s.io 51 | version: v1beta1 52 | insecureSkipTLSVerify: true 53 | groupPriorityMinimum: 100 54 | versionPriority: 100 55 | --- 56 | apiVersion: v1 57 | kind: ServiceAccount 58 | metadata: 59 | name: metrics-server 60 | namespace: kube-system 61 | --- 62 | apiVersion: apps/v1 63 | kind: Deployment 64 | metadata: 65 | name: metrics-server 66 | namespace: kube-system 67 | labels: 68 | k8s-app: metrics-server 69 | spec: 70 | selector: 71 | matchLabels: 72 | k8s-app: metrics-server 73 | template: 74 | metadata: 75 | name: metrics-server 76 | labels: 77 | k8s-app: metrics-server 78 | spec: 79 | serviceAccountName: metrics-server 80 | volumes: 81 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 82 | - name: tmp-dir 83 | emptyDir: {} 84 | containers: 85 | - name: metrics-server 86 | image: registry.cn-shenzhen.aliyuncs.com/cookcodeblog/metrics-server:v0.3.7 87 | imagePullPolicy: IfNotPresent 88 | args: 89 | - --cert-dir=/tmp 90 | - --secure-port=4443 91 | - --kubelet-insecure-tls 92 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 93 | ports: 94 | - name: main-port 95 | containerPort: 4443 96 | protocol: TCP 97 | securityContext: 98 | readOnlyRootFilesystem: true 99 | runAsNonRoot: true 100 | runAsUser: 1000 101 | volumeMounts: 102 | - name: tmp-dir 103 | mountPath: /tmp 104 | nodeSelector: 105 | kubernetes.io/os: linux 106 | --- 107 | apiVersion: v1 108 | kind: Service 109 | metadata: 110 | name: metrics-server 111 | namespace: kube-system 112 | labels: 113 | kubernetes.io/name: "Metrics-server" 114 | kubernetes.io/cluster-service: "true" 115 | spec: 116 | selector: 117 | k8s-app: metrics-server 118 | ports: 119 | - port: 443 120 | protocol: TCP 121 | targetPort: main-port 122 | --- 123 | apiVersion: rbac.authorization.k8s.io/v1 124 | kind: ClusterRole 125 | metadata: 126 | name: system:metrics-server 127 | rules: 128 | - apiGroups: 129 | - "" 130 | resources: 131 | - pods 132 | - nodes 133 | - nodes/stats 134 | - namespaces 135 | - configmaps 136 | verbs: 137 | - get 138 | - list 139 | - watch 140 | --- 141 | apiVersion: rbac.authorization.k8s.io/v1 142 | kind: ClusterRoleBinding 143 | metadata: 144 | name: system:metrics-server 145 | roleRef: 146 | apiGroup: rbac.authorization.k8s.io 147 | kind: ClusterRole 148 | name: system:metrics-server 149 | subjects: 150 | - kind: ServiceAccount 151 | name: metrics-server 152 | namespace: kube-system 153 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | image: quay.io/prometheus/alertmanager:v0.21.0 10 | nodeSelector: 11 | kubernetes.io/os: linux 12 | replicas: 3 13 | securityContext: 14 | fsGroup: 2000 15 | runAsNonRoot: true 16 | runAsUser: 1000 17 | serviceAccountName: alertmanager-main 18 | version: v0.21.0 19 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {} 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-main 6 | namespace: monitoring 7 | stringData: 8 | alertmanager.yaml: |- 9 | "global": 10 | "resolve_timeout": "5m" 11 | "inhibit_rules": 12 | - "equal": 13 | - "namespace" 14 | - "alertname" 15 | "source_match": 16 | "severity": "critical" 17 | "target_match_re": 18 | "severity": "warning|info" 19 | - "equal": 20 | - "namespace" 21 | - "alertname" 22 | "source_match": 23 | "severity": "warning" 24 | "target_match_re": 25 | "severity": "info" 26 | "receivers": 27 | - "name": "Default" 28 | - "name": "Watchdog" 29 | - "name": "Critical" 30 | "route": 31 | "group_by": 32 | - "namespace" 33 | "group_interval": "5m" 34 | "group_wait": "30s" 35 | "receiver": "Default" 36 | "repeat_interval": "12h" 37 | "routes": 38 | - "match": 39 | "alertname": "Watchdog" 40 | "receiver": "Watchdog" 41 | - "match": 42 | "severity": "critical" 43 | "receiver": "Critical" 44 | type: Opaque 45 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "Default", 9 | "name": "0", 10 | "options": { 11 | "path": "/grafana-dashboard-definitions/0" 12 | }, 13 | "orgId": 1, 14 | "type": "file" 15 | } 16 | ] 17 | } 18 | kind: ConfigMap 19 | metadata: 20 | name: grafana-dashboards 21 | namespace: monitoring 22 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/grafana-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | template: 14 | metadata: 15 | annotations: 16 | checksum/grafana-datasources: 48faab41f579fc8efde6034391496f6a 17 | labels: 18 | app: grafana 19 | spec: 20 | containers: 21 | - env: [] 22 | image: grafana/grafana:7.1.0 23 | name: grafana 24 | ports: 25 | - containerPort: 3000 26 | name: http 27 | readinessProbe: 28 | httpGet: 29 | path: /api/health 30 | port: http 31 | resources: 32 | limits: 33 | cpu: 200m 34 | memory: 200Mi 35 | requests: 36 | cpu: 100m 37 | memory: 100Mi 38 | volumeMounts: 39 | - mountPath: /var/lib/grafana 40 | name: grafana-storage 41 | readOnly: false 42 | - mountPath: /etc/grafana/provisioning/datasources 43 | name: grafana-datasources 44 | readOnly: false 45 | - mountPath: /etc/grafana/provisioning/dashboards 46 | name: grafana-dashboards 47 | readOnly: false 48 | - mountPath: /grafana-dashboard-definitions/0/apiserver 49 | name: grafana-dashboard-apiserver 50 | readOnly: false 51 | - mountPath: /grafana-dashboard-definitions/0/cluster-total 52 | name: grafana-dashboard-cluster-total 53 | readOnly: false 54 | - mountPath: /grafana-dashboard-definitions/0/controller-manager 55 | name: grafana-dashboard-controller-manager 56 | readOnly: false 57 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-cluster 58 | name: grafana-dashboard-k8s-resources-cluster 59 | readOnly: false 60 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-namespace 61 | name: grafana-dashboard-k8s-resources-namespace 62 | readOnly: false 63 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-node 64 | name: grafana-dashboard-k8s-resources-node 65 | readOnly: false 66 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-pod 67 | name: grafana-dashboard-k8s-resources-pod 68 | readOnly: false 69 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-workload 70 | name: grafana-dashboard-k8s-resources-workload 71 | readOnly: false 72 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-workloads-namespace 73 | name: grafana-dashboard-k8s-resources-workloads-namespace 74 | readOnly: false 75 | - mountPath: /grafana-dashboard-definitions/0/kubelet 76 | name: grafana-dashboard-kubelet 77 | readOnly: false 78 | - mountPath: /grafana-dashboard-definitions/0/namespace-by-pod 79 | name: grafana-dashboard-namespace-by-pod 80 | readOnly: false 81 | - mountPath: /grafana-dashboard-definitions/0/namespace-by-workload 82 | name: grafana-dashboard-namespace-by-workload 83 | readOnly: false 84 | - mountPath: /grafana-dashboard-definitions/0/node-cluster-rsrc-use 85 | name: grafana-dashboard-node-cluster-rsrc-use 86 | readOnly: false 87 | - mountPath: /grafana-dashboard-definitions/0/node-rsrc-use 88 | name: grafana-dashboard-node-rsrc-use 89 | readOnly: false 90 | - mountPath: /grafana-dashboard-definitions/0/nodes 91 | name: grafana-dashboard-nodes 92 | readOnly: false 93 | - mountPath: /grafana-dashboard-definitions/0/persistentvolumesusage 94 | name: grafana-dashboard-persistentvolumesusage 95 | readOnly: false 96 | - mountPath: /grafana-dashboard-definitions/0/pod-total 97 | name: grafana-dashboard-pod-total 98 | readOnly: false 99 | - mountPath: /grafana-dashboard-definitions/0/prometheus-remote-write 100 | name: grafana-dashboard-prometheus-remote-write 101 | readOnly: false 102 | - mountPath: /grafana-dashboard-definitions/0/prometheus 103 | name: grafana-dashboard-prometheus 104 | readOnly: false 105 | - mountPath: /grafana-dashboard-definitions/0/proxy 106 | name: grafana-dashboard-proxy 107 | readOnly: false 108 | - mountPath: /grafana-dashboard-definitions/0/scheduler 109 | name: grafana-dashboard-scheduler 110 | readOnly: false 111 | - mountPath: /grafana-dashboard-definitions/0/statefulset 112 | name: grafana-dashboard-statefulset 113 | readOnly: false 114 | - mountPath: /grafana-dashboard-definitions/0/workload-total 115 | name: grafana-dashboard-workload-total 116 | readOnly: false 117 | nodeSelector: 118 | beta.kubernetes.io/os: linux 119 | securityContext: 120 | fsGroup: 65534 121 | runAsNonRoot: true 122 | runAsUser: 65534 123 | serviceAccountName: grafana 124 | volumes: 125 | - emptyDir: {} 126 | name: grafana-storage 127 | - name: grafana-datasources 128 | secret: 129 | secretName: grafana-datasources 130 | - configMap: 131 | name: grafana-dashboards 132 | name: grafana-dashboards 133 | - configMap: 134 | name: grafana-dashboard-apiserver 135 | name: grafana-dashboard-apiserver 136 | - configMap: 137 | name: grafana-dashboard-cluster-total 138 | name: grafana-dashboard-cluster-total 139 | - configMap: 140 | name: grafana-dashboard-controller-manager 141 | name: grafana-dashboard-controller-manager 142 | - configMap: 143 | name: grafana-dashboard-k8s-resources-cluster 144 | name: grafana-dashboard-k8s-resources-cluster 145 | - configMap: 146 | name: grafana-dashboard-k8s-resources-namespace 147 | name: grafana-dashboard-k8s-resources-namespace 148 | - configMap: 149 | name: grafana-dashboard-k8s-resources-node 150 | name: grafana-dashboard-k8s-resources-node 151 | - configMap: 152 | name: grafana-dashboard-k8s-resources-pod 153 | name: grafana-dashboard-k8s-resources-pod 154 | - configMap: 155 | name: grafana-dashboard-k8s-resources-workload 156 | name: grafana-dashboard-k8s-resources-workload 157 | - configMap: 158 | name: grafana-dashboard-k8s-resources-workloads-namespace 159 | name: grafana-dashboard-k8s-resources-workloads-namespace 160 | - configMap: 161 | name: grafana-dashboard-kubelet 162 | name: grafana-dashboard-kubelet 163 | - configMap: 164 | name: grafana-dashboard-namespace-by-pod 165 | name: grafana-dashboard-namespace-by-pod 166 | - configMap: 167 | name: grafana-dashboard-namespace-by-workload 168 | name: grafana-dashboard-namespace-by-workload 169 | - configMap: 170 | name: grafana-dashboard-node-cluster-rsrc-use 171 | name: grafana-dashboard-node-cluster-rsrc-use 172 | - configMap: 173 | name: grafana-dashboard-node-rsrc-use 174 | name: grafana-dashboard-node-rsrc-use 175 | - configMap: 176 | name: grafana-dashboard-nodes 177 | name: grafana-dashboard-nodes 178 | - configMap: 179 | name: grafana-dashboard-persistentvolumesusage 180 | name: grafana-dashboard-persistentvolumesusage 181 | - configMap: 182 | name: grafana-dashboard-pod-total 183 | name: grafana-dashboard-pod-total 184 | - configMap: 185 | name: grafana-dashboard-prometheus-remote-write 186 | name: grafana-dashboard-prometheus-remote-write 187 | - configMap: 188 | name: grafana-dashboard-prometheus 189 | name: grafana-dashboard-prometheus 190 | - configMap: 191 | name: grafana-dashboard-proxy 192 | name: grafana-dashboard-proxy 193 | - configMap: 194 | name: grafana-dashboard-scheduler 195 | name: grafana-dashboard-scheduler 196 | - configMap: 197 | name: grafana-dashboard-statefulset 198 | name: grafana-dashboard-statefulset 199 | - configMap: 200 | name: grafana-dashboard-workload-total 201 | name: grafana-dashboard-workload-total 202 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 3000 12 | targetPort: http 13 | selector: 14 | app: grafana 15 | type: NodePort 16 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - interval: 15s 9 | port: http 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/kube-state-metrics-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: v1.9.7 7 | name: kube-state-metrics 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | - secrets 14 | - nodes 15 | - pods 16 | - services 17 | - resourcequotas 18 | - replicationcontrollers 19 | - limitranges 20 | - persistentvolumeclaims 21 | - persistentvolumes 22 | - namespaces 23 | - endpoints 24 | verbs: 25 | - list 26 | - watch 27 | - apiGroups: 28 | - extensions 29 | resources: 30 | - daemonsets 31 | - deployments 32 | - replicasets 33 | - ingresses 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - statefulsets 41 | - daemonsets 42 | - deployments 43 | - replicasets 44 | verbs: 45 | - list 46 | - watch 47 | - apiGroups: 48 | - batch 49 | resources: 50 | - cronjobs 51 | - jobs 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - autoscaling 57 | resources: 58 | - horizontalpodautoscalers 59 | verbs: 60 | - list 61 | - watch 62 | - apiGroups: 63 | - authentication.k8s.io 64 | resources: 65 | - tokenreviews 66 | verbs: 67 | - create 68 | - apiGroups: 69 | - authorization.k8s.io 70 | resources: 71 | - subjectaccessreviews 72 | verbs: 73 | - create 74 | - apiGroups: 75 | - policy 76 | resources: 77 | - poddisruptionbudgets 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - certificates.k8s.io 83 | resources: 84 | - certificatesigningrequests 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - storage.k8s.io 90 | resources: 91 | - storageclasses 92 | - volumeattachments 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - admissionregistration.k8s.io 98 | resources: 99 | - mutatingwebhookconfigurations 100 | - validatingwebhookconfigurations 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - networking.k8s.io 106 | resources: 107 | - networkpolicies 108 | verbs: 109 | - list 110 | - watch 111 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: v1.9.7 7 | name: kube-state-metrics 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: kube-state-metrics 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kube-state-metrics 15 | namespace: monitoring 16 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/kube-state-metrics-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: v1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: kube-state-metrics 14 | template: 15 | metadata: 16 | labels: 17 | app.kubernetes.io/name: kube-state-metrics 18 | app.kubernetes.io/version: v1.9.7 19 | spec: 20 | containers: 21 | - args: 22 | - --host=127.0.0.1 23 | - --port=8081 24 | - --telemetry-host=127.0.0.1 25 | - --telemetry-port=8082 26 | image: quay.io/coreos/kube-state-metrics:v1.9.7 27 | name: kube-state-metrics 28 | - args: 29 | - --logtostderr 30 | - --secure-listen-address=:8443 31 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 32 | - --upstream=http://127.0.0.1:8081/ 33 | image: quay.io/brancz/kube-rbac-proxy:v0.6.0 34 | name: kube-rbac-proxy-main 35 | ports: 36 | - containerPort: 8443 37 | name: https-main 38 | securityContext: 39 | runAsUser: 65534 40 | - args: 41 | - --logtostderr 42 | - --secure-listen-address=:9443 43 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 44 | - --upstream=http://127.0.0.1:8082/ 45 | image: quay.io/brancz/kube-rbac-proxy:v0.6.0 46 | name: kube-rbac-proxy-self 47 | ports: 48 | - containerPort: 9443 49 | name: https-self 50 | securityContext: 51 | runAsUser: 65534 52 | nodeSelector: 53 | kubernetes.io/os: linux 54 | serviceAccountName: kube-state-metrics 55 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: v1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: https-main 13 | port: 8443 14 | targetPort: https-main 15 | - name: https-self 16 | port: 9443 17 | targetPort: https-self 18 | selector: 19 | app.kubernetes.io/name: kube-state-metrics 20 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: v1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/kube-state-metrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | honorLabels: true 13 | interval: 30s 14 | port: https-main 15 | relabelings: 16 | - action: labeldrop 17 | regex: (pod|service|endpoint|namespace) 18 | scheme: https 19 | scrapeTimeout: 30s 20 | tlsConfig: 21 | insecureSkipVerify: true 22 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 23 | interval: 30s 24 | port: https-self 25 | scheme: https 26 | tlsConfig: 27 | insecureSkipVerify: true 28 | jobLabel: app.kubernetes.io/name 29 | selector: 30 | matchLabels: 31 | app.kubernetes.io/name: kube-state-metrics 32 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: node-exporter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: node-exporter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: node-exporter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/node-exporter-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v1.0.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | selector: 11 | matchLabels: 12 | app.kubernetes.io/name: node-exporter 13 | template: 14 | metadata: 15 | labels: 16 | app.kubernetes.io/name: node-exporter 17 | app.kubernetes.io/version: v1.0.1 18 | spec: 19 | containers: 20 | - args: 21 | - --web.listen-address=127.0.0.1:9100 22 | - --path.procfs=/host/proc 23 | - --path.sysfs=/host/sys 24 | - --path.rootfs=/host/root 25 | - --no-collector.wifi 26 | - --no-collector.hwmon 27 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) 28 | image: quay.io/prometheus/node-exporter:v1.0.1 29 | name: node-exporter 30 | resources: 31 | limits: 32 | cpu: 250m 33 | memory: 180Mi 34 | requests: 35 | cpu: 102m 36 | memory: 180Mi 37 | volumeMounts: 38 | - mountPath: /host/proc 39 | mountPropagation: HostToContainer 40 | name: proc 41 | readOnly: true 42 | - mountPath: /host/sys 43 | mountPropagation: HostToContainer 44 | name: sys 45 | readOnly: true 46 | - mountPath: /host/root 47 | mountPropagation: HostToContainer 48 | name: root 49 | readOnly: true 50 | - args: 51 | - --logtostderr 52 | - --secure-listen-address=[$(IP)]:9100 53 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 54 | - --upstream=http://127.0.0.1:9100/ 55 | env: 56 | - name: IP 57 | valueFrom: 58 | fieldRef: 59 | fieldPath: status.podIP 60 | image: quay.io/brancz/kube-rbac-proxy:v0.6.0 61 | name: kube-rbac-proxy 62 | ports: 63 | - containerPort: 9100 64 | hostPort: 9100 65 | name: https 66 | resources: 67 | limits: 68 | cpu: 20m 69 | memory: 40Mi 70 | requests: 71 | cpu: 10m 72 | memory: 20Mi 73 | hostNetwork: true 74 | hostPID: true 75 | nodeSelector: 76 | kubernetes.io/os: linux 77 | securityContext: 78 | runAsNonRoot: true 79 | runAsUser: 65534 80 | serviceAccountName: node-exporter 81 | tolerations: 82 | - operator: Exists 83 | volumes: 84 | - hostPath: 85 | path: /proc 86 | name: proc 87 | - hostPath: 88 | path: /sys 89 | name: sys 90 | - hostPath: 91 | path: / 92 | name: root 93 | updateStrategy: 94 | rollingUpdate: 95 | maxUnavailable: 10% 96 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v1.0.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: https 13 | port: 9100 14 | targetPort: https 15 | selector: 16 | app.kubernetes.io/name: node-exporter 17 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/node-exporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v1.0.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | interval: 15s 13 | port: https 14 | relabelings: 15 | - action: replace 16 | regex: (.*) 17 | replacement: $1 18 | sourceLabels: 19 | - __meta_kubernetes_pod_node_name 20 | targetLabel: instance 21 | scheme: https 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | jobLabel: app.kubernetes.io/name 25 | selector: 26 | matchLabels: 27 | app.kubernetes.io/name: node-exporter 28 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | group: metrics.k8s.io 7 | groupPriorityMinimum: 100 8 | insecureSkipTLSVerify: true 9 | service: 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | version: v1beta1 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-adapter 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes 10 | - namespaces 11 | - pods 12 | - services 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 6 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | name: system:aggregated-metrics-reader 9 | rules: 10 | - apiGroups: 11 | - metrics.k8s.io 12 | resources: 13 | - pods 14 | - nodes 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-adapter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-adapter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: resource-metrics:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: resource-metrics-server-resources 5 | rules: 6 | - apiGroups: 7 | - metrics.k8s.io 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-configMap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.yaml: |- 4 | "resourceRules": 5 | "cpu": 6 | "containerLabel": "container" 7 | "containerQuery": "sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}[5m])) by (<<.GroupBy>>)" 8 | "nodeQuery": "sum(1 - irate(node_cpu_seconds_total{mode=\"idle\"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)" 9 | "resources": 10 | "overrides": 11 | "namespace": 12 | "resource": "namespace" 13 | "node": 14 | "resource": "node" 15 | "pod": 16 | "resource": "pod" 17 | "memory": 18 | "containerLabel": "container" 19 | "containerQuery": "sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}) by (<<.GroupBy>>)" 20 | "nodeQuery": "sum(node_memory_MemTotal_bytes{job=\"node-exporter\",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job=\"node-exporter\",<<.LabelMatchers>>}) by (<<.GroupBy>>)" 21 | "resources": 22 | "overrides": 23 | "instance": 24 | "resource": "node" 25 | "namespace": 26 | "resource": "namespace" 27 | "pod": 28 | "resource": "pod" 29 | "window": "5m" 30 | kind: ConfigMap 31 | metadata: 32 | name: adapter-config 33 | namespace: monitoring 34 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: prometheus-adapter 11 | strategy: 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | template: 16 | metadata: 17 | labels: 18 | name: prometheus-adapter 19 | spec: 20 | containers: 21 | - args: 22 | - --cert-dir=/var/run/serving-cert 23 | - --config=/etc/adapter/config.yaml 24 | - --logtostderr=true 25 | - --metrics-relist-interval=1m 26 | - --prometheus-url=http://prometheus-k8s.monitoring.svc.cluster.local:9090/ 27 | - --secure-port=6443 28 | image: directxman12/k8s-prometheus-adapter:v0.8.0 29 | name: prometheus-adapter 30 | ports: 31 | - containerPort: 6443 32 | volumeMounts: 33 | - mountPath: /tmp 34 | name: tmpfs 35 | readOnly: false 36 | - mountPath: /var/run/serving-cert 37 | name: volume-serving-cert 38 | readOnly: false 39 | - mountPath: /etc/adapter 40 | name: config 41 | readOnly: false 42 | nodeSelector: 43 | kubernetes.io/os: linux 44 | serviceAccountName: prometheus-adapter 45 | volumes: 46 | - emptyDir: {} 47 | name: tmpfs 48 | - emptyDir: {} 49 | name: volume-serving-cert 50 | - configMap: 51 | name: adapter-config 52 | name: config 53 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: resource-metrics-auth-reader 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: extension-apiserver-authentication-reader 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-adapter 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 6443 13 | selector: 14 | name: prometheus-adapter 15 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-adapter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | scheme: https 14 | tlsConfig: 15 | insecureSkipVerify: true 16 | selector: 17 | matchLabels: 18 | name: prometheus-adapter 19 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.43.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | honorLabels: true 14 | port: https 15 | scheme: https 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: controller 21 | app.kubernetes.io/name: prometheus-operator 22 | app.kubernetes.io/version: v0.43.0 23 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: k8s 7 | namespace: monitoring 8 | spec: 9 | alerting: 10 | alertmanagers: 11 | - name: alertmanager-main 12 | namespace: monitoring 13 | port: web 14 | image: quay.io/prometheus/prometheus:v2.20.0 15 | nodeSelector: 16 | kubernetes.io/os: linux 17 | podMonitorNamespaceSelector: {} 18 | podMonitorSelector: {} 19 | probeNamespaceSelector: {} 20 | probeSelector: {} 21 | replicas: 2 22 | resources: 23 | requests: 24 | memory: 400Mi 25 | ruleSelector: 26 | matchLabels: 27 | prometheus: k8s 28 | role: alert-rules 29 | securityContext: 30 | fsGroup: 2000 31 | runAsNonRoot: true 32 | runAsUser: 1000 33 | serviceAccountName: prometheus-k8s 34 | serviceMonitorNamespaceSelector: {} 35 | serviceMonitorSelector: {} 36 | version: v2.20.0 37 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: prometheus-k8s-config 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-k8s 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-roleBindingSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: prometheus-k8s 12 | subjects: 13 | - kind: ServiceAccount 14 | name: prometheus-k8s 15 | namespace: monitoring 16 | - apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | name: prometheus-k8s 20 | namespace: kube-system 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: Role 24 | name: prometheus-k8s 25 | subjects: 26 | - kind: ServiceAccount 27 | name: prometheus-k8s 28 | namespace: monitoring 29 | - apiVersion: rbac.authorization.k8s.io/v1 30 | kind: RoleBinding 31 | metadata: 32 | name: prometheus-k8s 33 | namespace: monitoring 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: Role 37 | name: prometheus-k8s 38 | subjects: 39 | - kind: ServiceAccount 40 | name: prometheus-k8s 41 | namespace: monitoring 42 | kind: RoleBindingList 43 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-roleSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - services 13 | - endpoints 14 | - pods 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiGroups: 20 | - extensions 21 | resources: 22 | - ingresses 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - apiVersion: rbac.authorization.k8s.io/v1 28 | kind: Role 29 | metadata: 30 | name: prometheus-k8s 31 | namespace: kube-system 32 | rules: 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - services 37 | - endpoints 38 | - pods 39 | verbs: 40 | - get 41 | - list 42 | - watch 43 | - apiGroups: 44 | - extensions 45 | resources: 46 | - ingresses 47 | verbs: 48 | - get 49 | - list 50 | - watch 51 | - apiVersion: rbac.authorization.k8s.io/v1 52 | kind: Role 53 | metadata: 54 | name: prometheus-k8s 55 | namespace: monitoring 56 | rules: 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - services 61 | - endpoints 62 | - pods 63 | verbs: 64 | - get 65 | - list 66 | - watch 67 | - apiGroups: 68 | - extensions 69 | resources: 70 | - ingresses 71 | verbs: 72 | - get 73 | - list 74 | - watch 75 | kind: RoleList 76 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceMonitorApiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: apiserver 6 | name: kube-apiserver 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | metricRelabelings: 13 | - action: drop 14 | regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) 15 | sourceLabels: 16 | - __name__ 17 | - action: drop 18 | regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) 19 | sourceLabels: 20 | - __name__ 21 | - action: drop 22 | regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) 23 | sourceLabels: 24 | - __name__ 25 | - action: drop 26 | regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) 27 | sourceLabels: 28 | - __name__ 29 | - action: drop 30 | regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) 31 | sourceLabels: 32 | - __name__ 33 | - action: drop 34 | regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) 35 | sourceLabels: 36 | - __name__ 37 | - action: drop 38 | regex: transformation_(transformation_latencies_microseconds|failures_total) 39 | sourceLabels: 40 | - __name__ 41 | - action: drop 42 | regex: (admission_quota_controller_adds|crd_autoregistration_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|AvailableConditionController_retries|crd_openapi_controller_unfinished_work_seconds|APIServiceRegistrationController_retries|admission_quota_controller_longest_running_processor_microseconds|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_unfinished_work_seconds|crd_openapi_controller_adds|crd_autoregistration_controller_retries|crd_finalizer_queue_latency|AvailableConditionController_work_duration|non_structural_schema_condition_controller_depth|crd_autoregistration_controller_unfinished_work_seconds|AvailableConditionController_adds|DiscoveryController_longest_running_processor_microseconds|autoregister_queue_latency|crd_autoregistration_controller_adds|non_structural_schema_condition_controller_work_duration|APIServiceRegistrationController_adds|crd_finalizer_work_duration|crd_naming_condition_controller_unfinished_work_seconds|crd_openapi_controller_longest_running_processor_microseconds|DiscoveryController_adds|crd_autoregistration_controller_longest_running_processor_microseconds|autoregister_unfinished_work_seconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|non_structural_schema_condition_controller_queue_latency|crd_naming_condition_controller_depth|AvailableConditionController_longest_running_processor_microseconds|crdEstablishing_depth|crd_finalizer_longest_running_processor_microseconds|crd_naming_condition_controller_adds|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_unfinished_work_seconds|crd_openapi_controller_depth|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|DiscoveryController_work_duration|autoregister_adds|crd_autoregistration_controller_queue_latency|crd_finalizer_retries|AvailableConditionController_unfinished_work_seconds|autoregister_longest_running_processor_microseconds|non_structural_schema_condition_controller_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_depth|AvailableConditionController_depth|DiscoveryController_retries|admission_quota_controller_depth|crdEstablishing_adds|APIServiceOpenAPIAggregationControllerQueue1_retries|crdEstablishing_queue_latency|non_structural_schema_condition_controller_longest_running_processor_microseconds|autoregister_work_duration|crd_openapi_controller_retries|APIServiceRegistrationController_work_duration|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_openapi_controller_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_queue_latency|crd_autoregistration_controller_depth|AvailableConditionController_queue_latency|admission_quota_controller_queue_latency|crd_naming_condition_controller_work_duration|crd_openapi_controller_work_duration|DiscoveryController_depth|crd_naming_condition_controller_longest_running_processor_microseconds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|crd_finalizer_unfinished_work_seconds|crdEstablishing_retries|admission_quota_controller_unfinished_work_seconds|non_structural_schema_condition_controller_adds|APIServiceRegistrationController_unfinished_work_seconds|admission_quota_controller_work_duration|autoregister_depth|autoregister_retries|kubeproxy_sync_proxy_rules_latency_microseconds|rest_client_request_latency_seconds|non_structural_schema_condition_controller_retries) 43 | sourceLabels: 44 | - __name__ 45 | - action: drop 46 | regex: etcd_(debugging|disk|server).* 47 | sourceLabels: 48 | - __name__ 49 | - action: drop 50 | regex: apiserver_admission_controller_admission_latencies_seconds_.* 51 | sourceLabels: 52 | - __name__ 53 | - action: drop 54 | regex: apiserver_admission_step_admission_latencies_seconds_.* 55 | sourceLabels: 56 | - __name__ 57 | - action: drop 58 | regex: apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50) 59 | sourceLabels: 60 | - __name__ 61 | - le 62 | port: https 63 | scheme: https 64 | tlsConfig: 65 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 66 | serverName: kubernetes 67 | jobLabel: component 68 | namespaceSelector: 69 | matchNames: 70 | - default 71 | selector: 72 | matchLabels: 73 | component: apiserver 74 | provider: kubernetes 75 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: coredns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceMonitorKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | metricRelabelings: 13 | - action: drop 14 | regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) 15 | sourceLabels: 16 | - __name__ 17 | - action: drop 18 | regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) 19 | sourceLabels: 20 | - __name__ 21 | - action: drop 22 | regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) 23 | sourceLabels: 24 | - __name__ 25 | - action: drop 26 | regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) 27 | sourceLabels: 28 | - __name__ 29 | - action: drop 30 | regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) 31 | sourceLabels: 32 | - __name__ 33 | - action: drop 34 | regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) 35 | sourceLabels: 36 | - __name__ 37 | - action: drop 38 | regex: transformation_(transformation_latencies_microseconds|failures_total) 39 | sourceLabels: 40 | - __name__ 41 | - action: drop 42 | regex: (admission_quota_controller_adds|crd_autoregistration_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|AvailableConditionController_retries|crd_openapi_controller_unfinished_work_seconds|APIServiceRegistrationController_retries|admission_quota_controller_longest_running_processor_microseconds|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_unfinished_work_seconds|crd_openapi_controller_adds|crd_autoregistration_controller_retries|crd_finalizer_queue_latency|AvailableConditionController_work_duration|non_structural_schema_condition_controller_depth|crd_autoregistration_controller_unfinished_work_seconds|AvailableConditionController_adds|DiscoveryController_longest_running_processor_microseconds|autoregister_queue_latency|crd_autoregistration_controller_adds|non_structural_schema_condition_controller_work_duration|APIServiceRegistrationController_adds|crd_finalizer_work_duration|crd_naming_condition_controller_unfinished_work_seconds|crd_openapi_controller_longest_running_processor_microseconds|DiscoveryController_adds|crd_autoregistration_controller_longest_running_processor_microseconds|autoregister_unfinished_work_seconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|non_structural_schema_condition_controller_queue_latency|crd_naming_condition_controller_depth|AvailableConditionController_longest_running_processor_microseconds|crdEstablishing_depth|crd_finalizer_longest_running_processor_microseconds|crd_naming_condition_controller_adds|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_unfinished_work_seconds|crd_openapi_controller_depth|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|DiscoveryController_work_duration|autoregister_adds|crd_autoregistration_controller_queue_latency|crd_finalizer_retries|AvailableConditionController_unfinished_work_seconds|autoregister_longest_running_processor_microseconds|non_structural_schema_condition_controller_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_depth|AvailableConditionController_depth|DiscoveryController_retries|admission_quota_controller_depth|crdEstablishing_adds|APIServiceOpenAPIAggregationControllerQueue1_retries|crdEstablishing_queue_latency|non_structural_schema_condition_controller_longest_running_processor_microseconds|autoregister_work_duration|crd_openapi_controller_retries|APIServiceRegistrationController_work_duration|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_openapi_controller_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_queue_latency|crd_autoregistration_controller_depth|AvailableConditionController_queue_latency|admission_quota_controller_queue_latency|crd_naming_condition_controller_work_duration|crd_openapi_controller_work_duration|DiscoveryController_depth|crd_naming_condition_controller_longest_running_processor_microseconds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|crd_finalizer_unfinished_work_seconds|crdEstablishing_retries|admission_quota_controller_unfinished_work_seconds|non_structural_schema_condition_controller_adds|APIServiceRegistrationController_unfinished_work_seconds|admission_quota_controller_work_duration|autoregister_depth|autoregister_retries|kubeproxy_sync_proxy_rules_latency_microseconds|rest_client_request_latency_seconds|non_structural_schema_condition_controller_retries) 43 | sourceLabels: 44 | - __name__ 45 | - action: drop 46 | regex: etcd_(debugging|disk|request|server).* 47 | sourceLabels: 48 | - __name__ 49 | port: https-metrics 50 | scheme: https 51 | tlsConfig: 52 | insecureSkipVerify: true 53 | jobLabel: k8s-app 54 | namespaceSelector: 55 | matchNames: 56 | - kube-system 57 | selector: 58 | matchLabels: 59 | k8s-app: kube-controller-manager 60 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https-metrics 13 | scheme: https 14 | tlsConfig: 15 | insecureSkipVerify: true 16 | jobLabel: k8s-app 17 | namespaceSelector: 18 | matchNames: 19 | - kube-system 20 | selector: 21 | matchLabels: 22 | k8s-app: kube-scheduler 23 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/prometheus-serviceMonitorKubelet.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kubelet 6 | name: kubelet 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | honorLabels: true 12 | interval: 30s 13 | metricRelabelings: 14 | - action: drop 15 | regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) 16 | sourceLabels: 17 | - __name__ 18 | - action: drop 19 | regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) 20 | sourceLabels: 21 | - __name__ 22 | - action: drop 23 | regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) 24 | sourceLabels: 25 | - __name__ 26 | - action: drop 27 | regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) 28 | sourceLabels: 29 | - __name__ 30 | - action: drop 31 | regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) 32 | sourceLabels: 33 | - __name__ 34 | - action: drop 35 | regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) 36 | sourceLabels: 37 | - __name__ 38 | - action: drop 39 | regex: transformation_(transformation_latencies_microseconds|failures_total) 40 | sourceLabels: 41 | - __name__ 42 | - action: drop 43 | regex: (admission_quota_controller_adds|crd_autoregistration_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|AvailableConditionController_retries|crd_openapi_controller_unfinished_work_seconds|APIServiceRegistrationController_retries|admission_quota_controller_longest_running_processor_microseconds|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_unfinished_work_seconds|crd_openapi_controller_adds|crd_autoregistration_controller_retries|crd_finalizer_queue_latency|AvailableConditionController_work_duration|non_structural_schema_condition_controller_depth|crd_autoregistration_controller_unfinished_work_seconds|AvailableConditionController_adds|DiscoveryController_longest_running_processor_microseconds|autoregister_queue_latency|crd_autoregistration_controller_adds|non_structural_schema_condition_controller_work_duration|APIServiceRegistrationController_adds|crd_finalizer_work_duration|crd_naming_condition_controller_unfinished_work_seconds|crd_openapi_controller_longest_running_processor_microseconds|DiscoveryController_adds|crd_autoregistration_controller_longest_running_processor_microseconds|autoregister_unfinished_work_seconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|non_structural_schema_condition_controller_queue_latency|crd_naming_condition_controller_depth|AvailableConditionController_longest_running_processor_microseconds|crdEstablishing_depth|crd_finalizer_longest_running_processor_microseconds|crd_naming_condition_controller_adds|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_unfinished_work_seconds|crd_openapi_controller_depth|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|DiscoveryController_work_duration|autoregister_adds|crd_autoregistration_controller_queue_latency|crd_finalizer_retries|AvailableConditionController_unfinished_work_seconds|autoregister_longest_running_processor_microseconds|non_structural_schema_condition_controller_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_depth|AvailableConditionController_depth|DiscoveryController_retries|admission_quota_controller_depth|crdEstablishing_adds|APIServiceOpenAPIAggregationControllerQueue1_retries|crdEstablishing_queue_latency|non_structural_schema_condition_controller_longest_running_processor_microseconds|autoregister_work_duration|crd_openapi_controller_retries|APIServiceRegistrationController_work_duration|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_openapi_controller_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_queue_latency|crd_autoregistration_controller_depth|AvailableConditionController_queue_latency|admission_quota_controller_queue_latency|crd_naming_condition_controller_work_duration|crd_openapi_controller_work_duration|DiscoveryController_depth|crd_naming_condition_controller_longest_running_processor_microseconds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|crd_finalizer_unfinished_work_seconds|crdEstablishing_retries|admission_quota_controller_unfinished_work_seconds|non_structural_schema_condition_controller_adds|APIServiceRegistrationController_unfinished_work_seconds|admission_quota_controller_work_duration|autoregister_depth|autoregister_retries|kubeproxy_sync_proxy_rules_latency_microseconds|rest_client_request_latency_seconds|non_structural_schema_condition_controller_retries) 44 | sourceLabels: 45 | - __name__ 46 | port: https-metrics 47 | relabelings: 48 | - sourceLabels: 49 | - __metrics_path__ 50 | targetLabel: metrics_path 51 | scheme: https 52 | tlsConfig: 53 | insecureSkipVerify: true 54 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 55 | honorLabels: true 56 | honorTimestamps: false 57 | interval: 30s 58 | metricRelabelings: 59 | - action: drop 60 | regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) 61 | sourceLabels: 62 | - __name__ 63 | path: /metrics/cadvisor 64 | port: https-metrics 65 | relabelings: 66 | - sourceLabels: 67 | - __metrics_path__ 68 | targetLabel: metrics_path 69 | scheme: https 70 | tlsConfig: 71 | insecureSkipVerify: true 72 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 73 | honorLabels: true 74 | interval: 30s 75 | path: /metrics/probes 76 | port: https-metrics 77 | relabelings: 78 | - sourceLabels: 79 | - __metrics_path__ 80 | targetLabel: metrics_path 81 | scheme: https 82 | tlsConfig: 83 | insecureSkipVerify: true 84 | jobLabel: k8s-app 85 | namespaceSelector: 86 | matchNames: 87 | - kube-system 88 | selector: 89 | matchLabels: 90 | k8s-app: kubelet 91 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/0namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-0podmonitorCustomResourceDefinition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.2.4 6 | creationTimestamp: null 7 | name: podmonitors.monitoring.coreos.com 8 | spec: 9 | group: monitoring.coreos.com 10 | names: 11 | kind: PodMonitor 12 | listKind: PodMonitorList 13 | plural: podmonitors 14 | singular: podmonitor 15 | scope: Namespaced 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: PodMonitor defines monitoring for a set of pods. 21 | properties: 22 | apiVersion: 23 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 24 | type: string 25 | kind: 26 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 27 | type: string 28 | metadata: 29 | type: object 30 | spec: 31 | description: Specification of desired Pod selection for target discovery by Prometheus. 32 | properties: 33 | jobLabel: 34 | description: The label to use to retrieve the job name from. 35 | type: string 36 | namespaceSelector: 37 | description: Selector to select which namespaces the Endpoints objects are discovered from. 38 | properties: 39 | any: 40 | description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. 41 | type: boolean 42 | matchNames: 43 | description: List of namespace names. 44 | items: 45 | type: string 46 | type: array 47 | type: object 48 | podMetricsEndpoints: 49 | description: A list of endpoints allowed as part of this PodMonitor. 50 | items: 51 | description: PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. 52 | properties: 53 | basicAuth: 54 | description: 'BasicAuth allow an endpoint to authenticate over basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' 55 | properties: 56 | password: 57 | description: The secret in the service monitor namespace that contains the password for authentication. 58 | properties: 59 | key: 60 | description: The key of the secret to select from. Must be a valid secret key. 61 | type: string 62 | name: 63 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 64 | type: string 65 | optional: 66 | description: Specify whether the Secret or its key must be defined 67 | type: boolean 68 | required: 69 | - key 70 | type: object 71 | username: 72 | description: The secret in the service monitor namespace that contains the username for authentication. 73 | properties: 74 | key: 75 | description: The key of the secret to select from. Must be a valid secret key. 76 | type: string 77 | name: 78 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 79 | type: string 80 | optional: 81 | description: Specify whether the Secret or its key must be defined 82 | type: boolean 83 | required: 84 | - key 85 | type: object 86 | type: object 87 | bearerTokenSecret: 88 | description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator. 89 | properties: 90 | key: 91 | description: The key of the secret to select from. Must be a valid secret key. 92 | type: string 93 | name: 94 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 95 | type: string 96 | optional: 97 | description: Specify whether the Secret or its key must be defined 98 | type: boolean 99 | required: 100 | - key 101 | type: object 102 | honorLabels: 103 | description: HonorLabels chooses the metric's labels on collisions with target labels. 104 | type: boolean 105 | honorTimestamps: 106 | description: HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. 107 | type: boolean 108 | interval: 109 | description: Interval at which metrics should be scraped 110 | type: string 111 | metricRelabelings: 112 | description: MetricRelabelConfigs to apply to samples before ingestion. 113 | items: 114 | description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' 115 | properties: 116 | action: 117 | description: Action to perform based on regex matching. Default is 'replace' 118 | type: string 119 | modulus: 120 | description: Modulus to take of the hash of the source label values. 121 | format: int64 122 | type: integer 123 | regex: 124 | description: Regular expression against which the extracted value is matched. Default is '(.*)' 125 | type: string 126 | replacement: 127 | description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' 128 | type: string 129 | separator: 130 | description: Separator placed between concatenated source label values. default is ';'. 131 | type: string 132 | sourceLabels: 133 | description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. 134 | items: 135 | type: string 136 | type: array 137 | targetLabel: 138 | description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. 139 | type: string 140 | type: object 141 | type: array 142 | params: 143 | additionalProperties: 144 | items: 145 | type: string 146 | type: array 147 | description: Optional HTTP URL parameters 148 | type: object 149 | path: 150 | description: HTTP path to scrape for metrics. 151 | type: string 152 | port: 153 | description: Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. 154 | type: string 155 | proxyUrl: 156 | description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. 157 | type: string 158 | relabelings: 159 | description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' 160 | items: 161 | description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' 162 | properties: 163 | action: 164 | description: Action to perform based on regex matching. Default is 'replace' 165 | type: string 166 | modulus: 167 | description: Modulus to take of the hash of the source label values. 168 | format: int64 169 | type: integer 170 | regex: 171 | description: Regular expression against which the extracted value is matched. Default is '(.*)' 172 | type: string 173 | replacement: 174 | description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' 175 | type: string 176 | separator: 177 | description: Separator placed between concatenated source label values. default is ';'. 178 | type: string 179 | sourceLabels: 180 | description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. 181 | items: 182 | type: string 183 | type: array 184 | targetLabel: 185 | description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. 186 | type: string 187 | type: object 188 | type: array 189 | scheme: 190 | description: HTTP scheme to use for scraping. 191 | type: string 192 | scrapeTimeout: 193 | description: Timeout after which the scrape is ended 194 | type: string 195 | targetPort: 196 | anyOf: 197 | - type: integer 198 | - type: string 199 | description: 'Deprecated: Use ''port'' instead.' 200 | x-kubernetes-int-or-string: true 201 | tlsConfig: 202 | description: TLS configuration to use when scraping the endpoint. 203 | properties: 204 | ca: 205 | description: Struct containing the CA cert to use for the targets. 206 | properties: 207 | configMap: 208 | description: ConfigMap containing data to use for the targets. 209 | properties: 210 | key: 211 | description: The key to select. 212 | type: string 213 | name: 214 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 215 | type: string 216 | optional: 217 | description: Specify whether the ConfigMap or its key must be defined 218 | type: boolean 219 | required: 220 | - key 221 | type: object 222 | secret: 223 | description: Secret containing data to use for the targets. 224 | properties: 225 | key: 226 | description: The key of the secret to select from. Must be a valid secret key. 227 | type: string 228 | name: 229 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 230 | type: string 231 | optional: 232 | description: Specify whether the Secret or its key must be defined 233 | type: boolean 234 | required: 235 | - key 236 | type: object 237 | type: object 238 | cert: 239 | description: Struct containing the client cert file for the targets. 240 | properties: 241 | configMap: 242 | description: ConfigMap containing data to use for the targets. 243 | properties: 244 | key: 245 | description: The key to select. 246 | type: string 247 | name: 248 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 249 | type: string 250 | optional: 251 | description: Specify whether the ConfigMap or its key must be defined 252 | type: boolean 253 | required: 254 | - key 255 | type: object 256 | secret: 257 | description: Secret containing data to use for the targets. 258 | properties: 259 | key: 260 | description: The key of the secret to select from. Must be a valid secret key. 261 | type: string 262 | name: 263 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 264 | type: string 265 | optional: 266 | description: Specify whether the Secret or its key must be defined 267 | type: boolean 268 | required: 269 | - key 270 | type: object 271 | type: object 272 | insecureSkipVerify: 273 | description: Disable target certificate validation. 274 | type: boolean 275 | keySecret: 276 | description: Secret containing the client key file for the targets. 277 | properties: 278 | key: 279 | description: The key of the secret to select from. Must be a valid secret key. 280 | type: string 281 | name: 282 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' 283 | type: string 284 | optional: 285 | description: Specify whether the Secret or its key must be defined 286 | type: boolean 287 | required: 288 | - key 289 | type: object 290 | serverName: 291 | description: Used to verify the hostname for the targets. 292 | type: string 293 | type: object 294 | type: object 295 | type: array 296 | podTargetLabels: 297 | description: PodTargetLabels transfers labels on the Kubernetes Pod onto the target. 298 | items: 299 | type: string 300 | type: array 301 | sampleLimit: 302 | description: SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. 303 | format: int64 304 | type: integer 305 | selector: 306 | description: Selector to select Pod objects. 307 | properties: 308 | matchExpressions: 309 | description: matchExpressions is a list of label selector requirements. The requirements are ANDed. 310 | items: 311 | description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. 312 | properties: 313 | key: 314 | description: key is the label key that the selector applies to. 315 | type: string 316 | operator: 317 | description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. 318 | type: string 319 | values: 320 | description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. 321 | items: 322 | type: string 323 | type: array 324 | required: 325 | - key 326 | - operator 327 | type: object 328 | type: array 329 | matchLabels: 330 | additionalProperties: 331 | type: string 332 | description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. 333 | type: object 334 | type: object 335 | targetLimit: 336 | description: TargetLimit defines a limit on the number of scraped targets that will be accepted. 337 | format: int64 338 | type: integer 339 | required: 340 | - podMetricsEndpoints 341 | - selector 342 | type: object 343 | required: 344 | - spec 345 | type: object 346 | served: true 347 | storage: true 348 | status: 349 | acceptedNames: 350 | kind: "" 351 | plural: "" 352 | conditions: [] 353 | storedVersions: [] 354 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-0probeCustomResourceDefinition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.2.4 6 | creationTimestamp: null 7 | name: probes.monitoring.coreos.com 8 | spec: 9 | group: monitoring.coreos.com 10 | names: 11 | kind: Probe 12 | listKind: ProbeList 13 | plural: probes 14 | singular: probe 15 | scope: Namespaced 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: Probe defines monitoring for a set of static targets or ingresses. 21 | properties: 22 | apiVersion: 23 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 24 | type: string 25 | kind: 26 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 27 | type: string 28 | metadata: 29 | type: object 30 | spec: 31 | description: Specification of desired Ingress selection for target discovery by Prometheus. 32 | properties: 33 | interval: 34 | description: Interval at which targets are probed using the configured prober. If not specified Prometheus' global scrape interval is used. 35 | type: string 36 | jobName: 37 | description: The job name assigned to scraped metrics by default. 38 | type: string 39 | module: 40 | description: 'The module to use for probing specifying how to probe the target. Example module configuring in the blackbox exporter: https://github.com/prometheus/blackbox_exporter/blob/master/example.yml' 41 | type: string 42 | prober: 43 | description: Specification for the prober to use for probing targets. The prober.URL parameter is required. Targets cannot be probed if left empty. 44 | properties: 45 | path: 46 | description: Path to collect metrics from. Defaults to `/probe`. 47 | type: string 48 | scheme: 49 | description: HTTP scheme to use for scraping. Defaults to `http`. 50 | type: string 51 | url: 52 | description: Mandatory URL of the prober. 53 | type: string 54 | required: 55 | - url 56 | type: object 57 | scrapeTimeout: 58 | description: Timeout for scraping metrics from the Prometheus exporter. 59 | type: string 60 | targets: 61 | description: Targets defines a set of static and/or dynamically discovered targets to be probed using the prober. 62 | properties: 63 | ingress: 64 | description: Ingress defines the set of dynamically discovered ingress objects which hosts are considered for probing. 65 | properties: 66 | namespaceSelector: 67 | description: Select Ingress objects by namespace. 68 | properties: 69 | any: 70 | description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. 71 | type: boolean 72 | matchNames: 73 | description: List of namespace names. 74 | items: 75 | type: string 76 | type: array 77 | type: object 78 | relabelingConfigs: 79 | description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' 80 | items: 81 | description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' 82 | properties: 83 | action: 84 | description: Action to perform based on regex matching. Default is 'replace' 85 | type: string 86 | modulus: 87 | description: Modulus to take of the hash of the source label values. 88 | format: int64 89 | type: integer 90 | regex: 91 | description: Regular expression against which the extracted value is matched. Default is '(.*)' 92 | type: string 93 | replacement: 94 | description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' 95 | type: string 96 | separator: 97 | description: Separator placed between concatenated source label values. default is ';'. 98 | type: string 99 | sourceLabels: 100 | description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. 101 | items: 102 | type: string 103 | type: array 104 | targetLabel: 105 | description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. 106 | type: string 107 | type: object 108 | type: array 109 | selector: 110 | description: Select Ingress objects by labels. 111 | properties: 112 | matchExpressions: 113 | description: matchExpressions is a list of label selector requirements. The requirements are ANDed. 114 | items: 115 | description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. 116 | properties: 117 | key: 118 | description: key is the label key that the selector applies to. 119 | type: string 120 | operator: 121 | description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. 122 | type: string 123 | values: 124 | description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. 125 | items: 126 | type: string 127 | type: array 128 | required: 129 | - key 130 | - operator 131 | type: object 132 | type: array 133 | matchLabels: 134 | additionalProperties: 135 | type: string 136 | description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. 137 | type: object 138 | type: object 139 | type: object 140 | staticConfig: 141 | description: 'StaticConfig defines static targets which are considers for probing. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.' 142 | properties: 143 | labels: 144 | additionalProperties: 145 | type: string 146 | description: Labels assigned to all metrics scraped from the targets. 147 | type: object 148 | static: 149 | description: Targets is a list of URLs to probe using the configured prober. 150 | items: 151 | type: string 152 | type: array 153 | type: object 154 | type: object 155 | type: object 156 | required: 157 | - spec 158 | type: object 159 | served: true 160 | storage: true 161 | status: 162 | acceptedNames: 163 | kind: "" 164 | plural: "" 165 | conditions: [] 166 | storedVersions: [] 167 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.2.4 6 | creationTimestamp: null 7 | name: prometheusrules.monitoring.coreos.com 8 | spec: 9 | group: monitoring.coreos.com 10 | names: 11 | kind: PrometheusRule 12 | listKind: PrometheusRuleList 13 | plural: prometheusrules 14 | singular: prometheusrule 15 | scope: Namespaced 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: PrometheusRule defines alerting rules for a Prometheus instance 21 | properties: 22 | apiVersion: 23 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 24 | type: string 25 | kind: 26 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 27 | type: string 28 | metadata: 29 | type: object 30 | spec: 31 | description: Specification of desired alerting rule definitions for Prometheus. 32 | properties: 33 | groups: 34 | description: Content of Prometheus rule file 35 | items: 36 | description: 'RuleGroup is a list of sequentially evaluated recording and alerting rules. Note: PartialResponseStrategy is only used by ThanosRuler and will be ignored by Prometheus instances. Valid values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' 37 | properties: 38 | interval: 39 | type: string 40 | name: 41 | type: string 42 | partial_response_strategy: 43 | type: string 44 | rules: 45 | items: 46 | description: Rule describes an alerting or recording rule. 47 | properties: 48 | alert: 49 | type: string 50 | annotations: 51 | additionalProperties: 52 | type: string 53 | type: object 54 | expr: 55 | anyOf: 56 | - type: integer 57 | - type: string 58 | x-kubernetes-int-or-string: true 59 | for: 60 | type: string 61 | labels: 62 | additionalProperties: 63 | type: string 64 | type: object 65 | record: 66 | type: string 67 | required: 68 | - expr 69 | type: object 70 | type: array 71 | required: 72 | - name 73 | - rules 74 | type: object 75 | type: array 76 | type: object 77 | required: 78 | - spec 79 | type: object 80 | served: true 81 | storage: true 82 | status: 83 | acceptedNames: 84 | kind: "" 85 | plural: "" 86 | conditions: [] 87 | storedVersions: [] 88 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.43.0 8 | name: prometheus-operator 9 | rules: 10 | - apiGroups: 11 | - monitoring.coreos.com 12 | resources: 13 | - alertmanagers 14 | - alertmanagers/finalizers 15 | - alertmanagerconfigs 16 | - prometheuses 17 | - prometheuses/finalizers 18 | - thanosrulers 19 | - thanosrulers/finalizers 20 | - servicemonitors 21 | - podmonitors 22 | - probes 23 | - prometheusrules 24 | verbs: 25 | - '*' 26 | - apiGroups: 27 | - apps 28 | resources: 29 | - statefulsets 30 | verbs: 31 | - '*' 32 | - apiGroups: 33 | - "" 34 | resources: 35 | - configmaps 36 | - secrets 37 | verbs: 38 | - '*' 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - pods 43 | verbs: 44 | - list 45 | - delete 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - services 50 | - services/finalizers 51 | - endpoints 52 | verbs: 53 | - get 54 | - create 55 | - update 56 | - delete 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - nodes 61 | verbs: 62 | - list 63 | - watch 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - namespaces 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - apiGroups: 73 | - networking.k8s.io 74 | resources: 75 | - ingresses 76 | verbs: 77 | - get 78 | - list 79 | - watch 80 | - apiGroups: 81 | - authentication.k8s.io 82 | resources: 83 | - tokenreviews 84 | verbs: 85 | - create 86 | - apiGroups: 87 | - authorization.k8s.io 88 | resources: 89 | - subjectaccessreviews 90 | verbs: 91 | - create 92 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.43.0 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.43.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/component: controller 15 | app.kubernetes.io/name: prometheus-operator 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/component: controller 20 | app.kubernetes.io/name: prometheus-operator 21 | app.kubernetes.io/version: v0.43.0 22 | spec: 23 | containers: 24 | - args: 25 | - --kubelet-service=kube-system/kubelet 26 | - --logtostderr=true 27 | - --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.43.0 28 | image: quay.io/prometheus-operator/prometheus-operator:v0.43.0 29 | name: prometheus-operator 30 | ports: 31 | - containerPort: 8080 32 | name: http 33 | resources: 34 | limits: 35 | cpu: 200m 36 | memory: 200Mi 37 | requests: 38 | cpu: 100m 39 | memory: 100Mi 40 | securityContext: 41 | allowPrivilegeEscalation: false 42 | - args: 43 | - --logtostderr 44 | - --secure-listen-address=:8443 45 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 46 | - --upstream=http://127.0.0.1:8080/ 47 | image: quay.io/brancz/kube-rbac-proxy:v0.6.0 48 | name: kube-rbac-proxy 49 | ports: 50 | - containerPort: 8443 51 | name: https 52 | securityContext: 53 | runAsUser: 65534 54 | nodeSelector: 55 | beta.kubernetes.io/os: linux 56 | securityContext: 57 | runAsNonRoot: true 58 | runAsUser: 65534 59 | serviceAccountName: prometheus-operator 60 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.43.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: https 14 | port: 8443 15 | targetPort: https 16 | selector: 17 | app.kubernetes.io/component: controller 18 | app.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/prometheus_grafana/manifests/setup/prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.43.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /kubeadm_v1.19.3/proxy/docker/http-proxy.conf.bak: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="HTTP_PROXY=http://172.31.240.127:3128" 3 | Environment="HTTPS_PROXY=http://172.31.240.127:3128" 4 | Environment="NO_PROXY=127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" -------------------------------------------------------------------------------- /kubeadm_v1.19.3/proxy/docker/unuse_docker_proxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /etc/systemd/system/docker.service.d 4 | 5 | rm http-proxy.conf 6 | 7 | systemctl daemon-reload 8 | systemctl restart docker -------------------------------------------------------------------------------- /kubeadm_v1.19.3/proxy/docker/use_docker_proxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /etc/systemd/system/docker.service.d 4 | 5 | cp http-proxy.conf.bak http-proxy.conf 6 | 7 | systemctl daemon-reload 8 | systemctl restart docker -------------------------------------------------------------------------------- /os/init_virtual_machine.sh: -------------------------------------------------------------------------------- 1 | 2 | $HOST_NAME="$1" 3 | IP="$2" 4 | 5 | # Set hostname 6 | hostnamectl set-hostname "${HOST_NAME}" 7 | 8 | # Set static IP 9 | sed -i "s/ONBOOT=no/ONBOOT=yes/g" /etc/sysconfig/network-scripts/ifcfg-ens33 10 | sed -i "s/IPADDR=192.168.37.100/IPADDR=${IP}/g" /etc/sysconfig/network-scripts/ifcfg-ens33 11 | systemctl restart network 12 | 13 | ip addr show 14 | 15 | -------------------------------------------------------------------------------- /os/use_centos_yum.sh: -------------------------------------------------------------------------------- 1 | # https://www.cnblogs.com/syqlp/p/6555524.html 2 | 3 | rpm -qa |grep yum 4 | rpm -qa|grep yum|xargs rpm -e --nodeps 5 | rpm -qa|grep python-urlgrabber|xargs rpm -e --nodeps 6 | rpm -qa |grep yum 7 | 8 | # Check detailed rpm version in mirror: 9 | # https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/ 10 | 11 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/yum-metadata-parser-1.1.4-10.el7.x86_64.rpm 12 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/yum-3.4.3-158.el7.centos.noarch.rpm 13 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/yum-plugin-fastestmirror-1.1.31-45.el7.noarch.rpm 14 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/yum-utils-1.1.31-45.el7.noarch.rpm 15 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python-urlgrabber-3.10-8.el7.noarch.rpm 16 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python-kitchen-1.1.1-5.el7.noarch.rpm 17 | wget https://mirrors.aliyun.com/centos/7/os/x86_64/Packages/python-chardet-2.2.1-1.el7_1.noarch.rpm 18 | 19 | rpm -ivh python-chardet-2.2.1-1.el7_1.noarch.rpm 20 | rpm -ivh python-kitchen-1.1.1-5.el7.noarch.rpm 21 | rpm -ivh python-urlgrabber-3.10-8.el7.noarch.rpm 22 | 23 | rpm -ivh yum-utils-1.1.31-45.el7.noarch.rpm yum-metadata-parser-1.1.4-10.el7.x86_64.rpm yum-3.4.3-158.el7.centos.noarch.rpm yum-plugin-fastestmirror-1.1.31-45.el7.noarch.rpm 24 | 25 | 26 | 27 | --------------------------------------------------------------------------------