├── pack ├── README.md └── downall.sh ├── config ├── Kwork │ ├── Kdocker │ │ └── docker-daemon.json │ ├── Kproxy │ │ ├── kube-proxy-csr.json │ │ └── kube-proxy.service │ └── Kkubelet │ │ ├── kubelet.service.template │ │ ├── kubelet.config.json.template │ │ └── csr-crb.yaml ├── Kcsh │ ├── hosts │ └── kubernetes.conf ├── Kzs │ ├── ca-csr.json │ └── ca-config.json ├── Kmaster │ ├── Kapi │ │ ├── encryption-config.yaml │ │ └── kubernetes-csr.json │ ├── Kscheduler │ │ ├── kube-scheduler-csr.json │ │ └── kube-scheduler.service │ ├── Kmanage │ │ └── kube-controller-manager-csr.json │ └── Kha │ │ └── haproxy.cfg ├── Knet │ └── flanneld-csr.json ├── Kctl │ └── admin-csr.json ├── Ketcd │ └── etcd-csr.json └── environment.sh ├── README.md └── magic.sh /pack/README.md: -------------------------------------------------------------------------------- 1 | #可以直接运行脚本对所需安装包下载,但是可能某些包因为网络问题会下载较慢,可以从我已经上传的百度云安装包进行下载,或者将链接放到迅雷当中下载。 2 | -------------------------------------------------------------------------------- /config/Kwork/Kdocker/docker-daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "registry-mirrors": ["https://hub-mirror.c.163.com", "https://docker.mirrors.ustc.edu.cn"], 3 | "max-concurrent-downloads": 20 4 | } 5 | -------------------------------------------------------------------------------- /config/Kcsh/hosts: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 2 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 3 | 192.168.111.3 kube-node1 4 | 192.168.111.4 kube-node2 5 | 192.168.111.5 kube-node3 6 | -------------------------------------------------------------------------------- /config/Kzs/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "CN", 10 | "ST": "BeiJing", 11 | "L": "BeiJing", 12 | "O": "k8s", 13 | "OU": "4Paradigm" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /config/Kmaster/Kapi/encryption-config.yaml: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: SYJsryNjAKnZU69Hl+TLhvBT2Ta4kM8MxXwCK9fB438= 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /config/Kwork/Kproxy/kube-proxy-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-proxy", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "CN", 10 | "ST": "BeiJing", 11 | "L": "BeiJing", 12 | "O": "k8s", 13 | "OU": "4Paradigm" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /config/Knet/flanneld-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "flanneld", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "k8s", 14 | "OU": "4Paradigm" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /config/Kctl/admin-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "admin", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "system:masters", 14 | "OU": "4Paradigm" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /config/Kzs/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "87600h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth" 13 | ], 14 | "expiry": "87600h" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /config/Kcsh/kubernetes.conf: -------------------------------------------------------------------------------- 1 | net.bridge.bridge-nf-call-iptables=1 2 | net.bridge.bridge-nf-call-ip6tables=1 3 | net.ipv4.ip_forward=1 4 | net.ipv4.tcp_tw_recycle=0 5 | vm.swappiness=0 6 | vm.overcommit_memory=1 7 | vm.panic_on_oom=0 8 | fs.inotify.max_user_watches=89100 9 | fs.file-max=52706963 10 | fs.nr_open=52706963 11 | net.ipv6.conf.all.disable_ipv6=1 12 | net.netfilter.nf_conntrack_max=2310720 13 | -------------------------------------------------------------------------------- /config/Ketcd/etcd-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "etcd", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "192.168.111.3", 6 | "192.168.111.4", 7 | "192.168.111.5" 8 | ], 9 | "key": { 10 | "algo": "rsa", 11 | "size": 2048 12 | }, 13 | "names": [ 14 | { 15 | "C": "CN", 16 | "ST": "BeiJing", 17 | "L": "BeiJing", 18 | "O": "k8s", 19 | "OU": "4Paradigm" 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /config/Kmaster/Kscheduler/kube-scheduler-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-scheduler", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "192.168.111.3", 6 | "192.168.111.4", 7 | "192.168.111.5" 8 | ], 9 | "key": { 10 | "algo": "rsa", 11 | "size": 2048 12 | }, 13 | "names": [ 14 | { 15 | "C": "CN", 16 | "ST": "BeiJing", 17 | "L": "BeiJing", 18 | "O": "system:kube-scheduler", 19 | "OU": "4Paradigm" 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /config/Kmaster/Kmanage/kube-controller-manager-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-controller-manager", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "hosts": [ 8 | "127.0.0.1", 9 | "192.168.111.3", 10 | "192.168.111.4", 11 | "192.168.111.5" 12 | ], 13 | "names": [ 14 | { 15 | "C": "CN", 16 | "ST": "BeiJing", 17 | "L": "BeiJing", 18 | "O": "system:kube-controller-manager", 19 | "OU": "4Paradigm" 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /config/Kmaster/Kscheduler/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart=/opt/k8s/bin/kube-scheduler \ 7 | --address=127.0.0.1 \ 8 | --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ 9 | --leader-elect=true \ 10 | --alsologtostderr=true \ 11 | --logtostderr=false \ 12 | --log-dir=/var/log/kubernetes \ 13 | --v=2 14 | Restart=on-failure 15 | RestartSec=5 16 | User=root 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /config/Kwork/Kproxy/kube-proxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube-Proxy Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=network.target 5 | 6 | [Service] 7 | WorkingDirectory=/var/lib/kube-proxy 8 | ExecStart=/opt/k8s/bin/kube-proxy \ 9 | --config=/etc/kubernetes/kube-proxy.config.yaml \ 10 | --alsologtostderr=true \ 11 | --logtostderr=false \ 12 | --log-dir=/var/log/kubernetes \ 13 | --v=2 14 | Restart=on-failure 15 | RestartSec=5 16 | LimitNOFILE=65536 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /pack/downall.sh: -------------------------------------------------------------------------------- 1 | wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 2 | wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 3 | wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 4 | wget https://dl.k8s.io/v1.10.4/kubernetes-client-linux-amd64.tar.gz 5 | wget https://github.com/coreos/etcd/releases/download/v3.3.7/etcd-v3.3.7-linux-amd64.tar.gz 6 | wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz 7 | wget https://dl.k8s.io/v1.10.4/kubernetes-server-linux-amd64.tar.gz 8 | wget https://download.docker.com/linux/static/stable/x86_64/docker-18.03.1-ce.tgz 9 | -------------------------------------------------------------------------------- /config/Kmaster/Kapi/kubernetes-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "192.168.111.3", 6 | "192.168.111.4", 7 | "192.168.111.5", 8 | "192.168.111.100", 9 | "10.254.0.1", 10 | "kubernetes", 11 | "kubernetes.default", 12 | "kubernetes.default.svc", 13 | "kubernetes.default.svc.cluster", 14 | "kubernetes.default.svc.cluster.local" 15 | ], 16 | "key": { 17 | "algo": "rsa", 18 | "size": 2048 19 | }, 20 | "names": [ 21 | { 22 | "C": "CN", 23 | "ST": "BeiJing", 24 | "L": "BeiJing", 25 | "O": "k8s", 26 | "OU": "4Paradigm" 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /config/Kwork/Kkubelet/kubelet.service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=docker.service 5 | Requires=docker.service 6 | 7 | [Service] 8 | WorkingDirectory=/var/lib/kubelet 9 | ExecStart=/opt/k8s/bin/kubelet \ 10 | --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \ 11 | --cert-dir=/etc/kubernetes/cert \ 12 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 13 | --config=/etc/kubernetes/kubelet.config.json \ 14 | --hostname-override=##NODE_NAME## \ 15 | --pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest \ 16 | --allow-privileged=true \ 17 | --alsologtostderr=true \ 18 | --logtostderr=false \ 19 | --log-dir=/var/log/kubernetes \ 20 | --v=2 21 | Restart=on-failure 22 | RestartSec=5 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | -------------------------------------------------------------------------------- /config/Kwork/Kkubelet/kubelet.config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "KubeletConfiguration", 3 | "apiVersion": "kubelet.config.k8s.io/v1beta1", 4 | "authentication": { 5 | "x509": { 6 | "clientCAFile": "/etc/kubernetes/cert/ca.pem" 7 | }, 8 | "webhook": { 9 | "enabled": true, 10 | "cacheTTL": "2m0s" 11 | }, 12 | "anonymous": { 13 | "enabled": false 14 | } 15 | }, 16 | "authorization": { 17 | "mode": "Webhook", 18 | "webhook": { 19 | "cacheAuthorizedTTL": "5m0s", 20 | "cacheUnauthorizedTTL": "30s" 21 | } 22 | }, 23 | "address": "##NODE_IP##", 24 | "port": 10250, 25 | "readOnlyPort": 0, 26 | "cgroupDriver": "cgroupfs", 27 | "hairpinMode": "promiscuous-bridge", 28 | "serializeImagePulls": false, 29 | "featureGates": { 30 | "RotateKubeletClientCertificate": true, 31 | "RotateKubeletServerCertificate": true 32 | }, 33 | "clusterDomain": "cluster.local.", 34 | "clusterDNS": ["10.254.0.2"] 35 | } 36 | -------------------------------------------------------------------------------- /config/Kmaster/Kha/haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local0 3 | log /dev/log local1 notice 4 | chroot /var/lib/haproxy 5 | stats socket /var/run/haproxy-admin.sock mode 660 level admin 6 | stats timeout 30s 7 | user haproxy 8 | group haproxy 9 | daemon 10 | nbproc 1 11 | defaults 12 | log global 13 | timeout connect 5000 14 | timeout client 10m 15 | timeout server 10m 16 | listen admin_stats 17 | bind 0.0.0.0:10080 18 | mode http 19 | log 127.0.0.1 local0 err 20 | stats refresh 30s 21 | stats uri /status 22 | stats realm welcome login\ Haproxy 23 | stats auth admin:123456 24 | stats hide-version 25 | stats admin if TRUE 26 | listen kube-master 27 | bind 0.0.0.0:8443 28 | mode tcp 29 | option tcplog 30 | balance source 31 | server 192.168.111.3 192.168.111.3:6443 check inter 2000 fall 2 rise 2 weight 1 32 | server 192.168.111.4 192.168.111.4:6443 check inter 2000 fall 2 rise 2 weight 1 33 | server 192.168.111.5 192.168.111.5:6443 check inter 2000 fall 2 rise 2 weight 1 34 | -------------------------------------------------------------------------------- /config/environment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | # 生成 EncryptionConfig 所需的加密 key 4 | export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64) 5 | 6 | # 最好使用当前未用的网段来定义服务网段和Pod网段 7 | # 服务网段,部署前路由不可达,部署后集群内路由可达(kube-proxy 和 ipvs 保证) 8 | export SERVICE_CIDR="10.254.0.0/16" 9 | 10 | # Pod 网段,建议 /16 段地址,部署前路由不可达,部署后集群内路由可达(flanneld 保证) 11 | export CLUSTER_CIDR="172.30.0.0/16" 12 | 13 | # 服务端口范围 (NodePort Range) 14 | export NODE_PORT_RANGE="8400-9000" 15 | 16 | # 集群各机器 IP 数组 17 | export NODE_IPS=(192.168.111.3 192.168.111.4 192.168.111.5) #把IP替换成自己所使用的主机IP即可,可加多个 18 | 19 | # 集群各 IP 对应的 主机名数组 20 | export NODE_NAMES=(kube-node1 kube-node2 kube-node3) #如果主机有多个,可酌情添加 21 | 22 | # kube-apiserver 的 VIP(HA 组件 keepalived 发布的 IP) 23 | export MASTER_VIP=192.168.111.100 #集群的虚拟IP 24 | 25 | # kube-apiserver VIP 地址(HA 组件 haproxy 监听 8443 端口) 26 | export KUBE_APISERVER="https://${MASTER_VIP}:8443" 27 | 28 | # HA 节点,配置 VIP 的网络接口名称 29 | export VIP_IF="eth0" 30 | 31 | # etcd 集群服务地址列表 32 | export ETCD_ENDPOINTS="https://192.168.111.3:2379,https://192.168.111.4:2379,https://192.168.111.5:2379" #把IP替换成自己所使用的主机IP即可,可加多个 33 | 34 | # etcd 集群间通信的 IP 和端口 35 | export ETCD_NODES="kube-node1=https://192.168.111.3:2380,kube-node2=https://192.168.111.4:2380,kube-node3=https://192.168.111.5:2380" #把IP替换成自己所使用的主机IP即可,可加多个 36 | 37 | # flanneld 网络配置前缀 38 | export FLANNEL_ETCD_PREFIX="/kubernetes/network" 39 | 40 | # kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP) 41 | export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1" 42 | 43 | # 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配) 44 | export CLUSTER_DNS_SVC_IP="10.254.0.2" 45 | 46 | # 集群 DNS 域名 47 | export CLUSTER_DNS_DOMAIN="cluster.local." 48 | 49 | # 将二进制目录 /opt/k8s/bin 加到 PATH 中 50 | export PATH=/opt/k8s/bin:$PATH 51 | -------------------------------------------------------------------------------- /config/Kwork/Kkubelet/csr-crb.yaml: -------------------------------------------------------------------------------- 1 | # Approve all CSRs for the group "system:bootstrappers" 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: auto-approve-csrs-for-group 6 | subjects: 7 | - kind: Group 8 | name: system:bootstrappers 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: ClusterRole 12 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient 13 | apiGroup: rbac.authorization.k8s.io 14 | --- 15 | # To let a node of the group "system:nodes" renew its own credentials 16 | kind: ClusterRoleBinding 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | metadata: 19 | name: node-client-cert-renewal 20 | subjects: 21 | - kind: Group 22 | name: system:nodes 23 | apiGroup: rbac.authorization.k8s.io 24 | roleRef: 25 | kind: ClusterRole 26 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 27 | apiGroup: rbac.authorization.k8s.io 28 | --- 29 | # A ClusterRole which instructs the CSR approver to approve a node requesting a 30 | # serving cert matching its client cert. 31 | kind: ClusterRole 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | metadata: 34 | name: approve-node-server-renewal-csr 35 | rules: 36 | - apiGroups: ["certificates.k8s.io"] 37 | resources: ["certificatesigningrequests/selfnodeserver"] 38 | verbs: ["create"] 39 | --- 40 | # To let a node of the group "system:nodes" renew its own server credentials 41 | kind: ClusterRoleBinding 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | metadata: 44 | name: node-server-cert-renewal 45 | subjects: 46 | - kind: Group 47 | name: system:nodes 48 | apiGroup: rbac.authorization.k8s.io 49 | roleRef: 50 | kind: ClusterRole 51 | name: approve-node-server-renewal-csr 52 | apiGroup: rbac.authorization.k8s.io 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## 1,简单说明。 3 | 4 | 此脚本所能够成形于今日,完全是拜大神分享的https://github.com/opsnull/follow-me-install-kubernetes-cluster 项目所依托而成。之前也曾想过对k8s熟悉之后做一下部署脚本,但那时候并没有什么多么好的思路,直到上周看到了如上开源项目的部署思路,让我有种拨云见日,豁然开朗的感觉,当我跟随项目学习的时候,就已经打算了要写一下部署小脚本了。 5 | 6 | 因此,这个脚本基本上可以说是大神项目流程的一个堆叠,自己则只不过是做了一点点小小的整理与调试罢了,**再一次,郑重的,对此表示感谢!** 7 | 8 | 当然啦,事实上当自己来整理这个脚本的时候发现,事情也并没有那么的简单,而写脚本的不简单,则是为了以后每次部署的更简单。 9 | 10 | 这里简单说明一下我使用的服务器情况: 11 | 12 | 服务器均采用CentOS7.3版本,未在其他系统版本中进行测试。 13 | 14 | | 主机 | 主机名 | 组件 | 15 | | :------------: | :------------: | :------------: | 16 | | 192.168.111.3 | kube-node1 | Kubernetes 1.10.4,Docker 18.03.1-ce,Etcd 3.3.7,Flanneld 0.10.0,kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,kube-proxy | 17 | | 192.168.111.4 | kube-node2 | 同上 | 18 | | 192.168.111.5 | kube-node3 | 同上 | 19 | 20 | ## 2,准备工作。 21 | 22 | 首先将整个部署文件上传到部署服务器,进行解压,然后做以下准备工作。 23 | 24 | 整个安装包我已打包并上传百度云,可自行下载。 25 | 26 | 链接: https://pan.baidu.com/s/1JbICafwEdIwHnsDlGvPIMw 提取码: 4iaq 27 | 28 | ### 1,修改以下内容。 29 | 30 | ``` 31 | config/environment.sh #修改ip为自己将要部署的机器ip 32 | 33 | config/Kcsh/hosts #修改ip为自己将要部署的机器ip 34 | 35 | config/Ketcd/etcd-csr.json #修改ip为自己将要部署的机器ip 36 | 37 | config/Kmaster/Kha/haproxy.cfg #修改ip为自己将要部署的机器ip 38 | 39 | config/Kmaster/Kapi/kubernetes-csr.json #修改ip为自己将要部署的机器ip 40 | 41 | config/Kmaster/Kmanage/kube-controller-manager-csr.json #修改ip为自己将要部署的机器ip 42 | 43 | config/Kmaster/Kscheduler/kube-scheduler-csr.json #修改ip为自己将要部署的机器ip 44 | ``` 45 | 46 | 47 | ### 2,基础配置。 48 | 49 | 这些操作均在kube-node1主机上执行。 50 | 51 | `注意:`请严格按照如下这几步操作进行,否则可能导致下边部署脚本无法正常走完。 52 | 53 | ``` 54 | ssh-keygen 55 | ssh-copy-id 192.168.111.3 56 | ssh-copy-id 192.168.111.4 57 | ssh-copy-id 192.168.111.5 58 | 59 | scp config/Kcsh/hosts root@192.168.111.3:/etc/hosts 60 | scp config/Kcsh/hosts root@192.168.111.4:/etc/hosts 61 | scp config/Kcsh/hosts root@192.168.111.5:/etc/hosts 62 | 63 | 64 | ssh root@kube-node1 "hostnamectl set-hostname kube-node1" 65 | ssh root@kube-node2 "hostnamectl set-hostname kube-node2" 66 | ssh root@kube-node3 "hostnamectl set-hostname kube-node3" 67 | ``` 68 | 69 | ## 3,正式部署。 70 | 71 | 部署非常简单,直接执行`magic.sh`脚本即可。 72 | 73 | 不过有几点需要做一下简单说明: 74 | 75 | - 1,启动正式部署之前,务必仔细认真检查各处配置是否与所需求的相匹配了,若不匹配,应当调整。 76 | - 2,部署过程中如果有卡壳,或者未正常部署而退出,请根据对应的部署阶段进行排查,然后重新执行部署脚本,即可进行接续部署。 77 | - 3,如对脚本中一些不足地方有任何建议,欢迎与我提出,一起维护,共同进步! 78 | 79 | ## 4,简单验证。 80 | 81 | 部署完成之后,可使用如下方式进行一些对集群可用性的初步检验: 82 | 83 | ### 1,检查服务是否均已正常启动。 84 | 85 | ``` 86 | #!/bin/bash 87 | # 88 | #author:eryajf 89 | #blog:www.eryajf.net 90 | #time:2018-11 91 | # 92 | set -e 93 | source /opt/k8s/bin/environment.sh 94 | 95 | # 96 | ##set color## 97 | echoRed() { echo $'\e[0;31m'"$1"$'\e[0m'; } 98 | echoGreen() { echo $'\e[0;32m'"$1"$'\e[0m'; } 99 | echoYellow() { echo $'\e[0;33m'"$1"$'\e[0m'; } 100 | ##set color## 101 | # 102 | 103 | for node_ip in ${NODE_IPS[@]} 104 | do 105 | echoGreen ">>> ${node_ip}" 106 | ssh root@${node_ip} "systemctl status etcd|grep Active" 107 | ssh root@${node_ip} "systemctl status flanneld|grep Active" 108 | ssh root@${node_ip} "systemctl status haproxy|grep Active" 109 | ssh root@${node_ip} "systemctl status keepalived|grep Active" 110 | ssh root@${node_ip} "systemctl status kube-apiserver |grep 'Active:'" 111 | ssh root@${node_ip} "systemctl status kube-controller-manager|grep Active" 112 | ssh root@${node_ip} "systemctl status kube-scheduler|grep Active" 113 | ssh root@${node_ip} "systemctl status docker|grep Active" 114 | ssh root@${node_ip} "systemctl status kubelet | grep Active" 115 | ssh root@${node_ip} "systemctl status kube-proxy|grep Active" 116 | done 117 | ``` 118 | 119 | ### 2,查看相关服务可用性。 120 | 121 | #### 1,验证etcd集群可用性。 122 | 123 | ``` 124 | cat > magic.sh << "EOF" 125 | #!/bin/bash 126 | 127 | source /opt/k8s/bin/environment.sh 128 | 129 | for node_ip in ${NODE_IPS[@]} 130 | do 131 | echo ">>> ${node_ip}" 132 | ETCDCTL_API=3 /opt/k8s/bin/etcdctl \ 133 | --endpoints=https://${node_ip}:2379 \ 134 | --cacert=/etc/kubernetes/cert/ca.pem \ 135 | --cert=/etc/etcd/cert/etcd.pem \ 136 | --key=/etc/etcd/cert/etcd-key.pem endpoint health 137 | done 138 | EOF 139 | ``` 140 | 141 | #### 2,验证flannel网络。 142 | 143 | 查看已分配的 Pod 子网段列表: 144 | 145 | ``` 146 | source /opt/k8s/bin/environment.sh 147 | 148 | etcdctl \ 149 | --endpoints=${ETCD_ENDPOINTS} \ 150 | --ca-file=/etc/kubernetes/cert/ca.pem \ 151 | --cert-file=/etc/flanneld/cert/flanneld.pem \ 152 | --key-file=/etc/flanneld/cert/flanneld-key.pem \ 153 | ls ${FLANNEL_ETCD_PREFIX}/subnets 154 | ``` 155 | 156 | 输出: 157 | 158 | ``` 159 | /kubernetes/network/subnets/172.30.84.0-24 160 | /kubernetes/network/subnets/172.30.8.0-24 161 | /kubernetes/network/subnets/172.30.29.0-24 162 | ``` 163 | 164 | 验证各节点能通过 Pod 网段互通: 165 | 166 | `注意其中的IP段换成自己的。` 167 | 168 | ``` 169 | cat > magic.sh << "EOF" 170 | #!/bin/bash 171 | 172 | source /opt/k8s/bin/environment.sh 173 | 174 | for node_ip in ${NODE_IPS[@]} 175 | do 176 | echo ">>> ${node_ip}" 177 | ssh ${node_ip} "ping -c 1 172.30.8.0" 178 | ssh ${node_ip} "ping -c 1 172.30.29.0" 179 | ssh ${node_ip} "ping -c 1 172.30.84.0" 180 | done 181 | EOF 182 | ``` 183 | 184 | #### 3,高可用组件验证。 185 | 186 | 查看 VIP 所在的节点,确保可以 ping 通 VIP: 187 | 188 | ``` 189 | cat > magic.sh << "EOF" 190 | #!/bin/bash 191 | 192 | source /opt/k8s/bin/environment.sh 193 | 194 | for node_ip in ${NODE_IPS[@]} 195 | do 196 | echo ">>> ${node_ip}" 197 | ssh ${node_ip} "/usr/sbin/ip addr show ${VIP_IF}" 198 | ssh ${node_ip} "ping -c 1 ${MASTER_VIP}" 199 | done 200 | EOF 201 | ``` 202 | 203 | #### 4,高可用性试验。 204 | 205 | 查看当前的 leader: 206 | 207 | ``` 208 | $kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml 209 | apiVersion: v1 210 | kind: Endpoints 211 | metadata: 212 | annotations: 213 | control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"kube-node1_444fbc06-f3d8-11e8-8ca8-0050568f514f","leaseDurationSeconds":15,"acquireTime":"2018-11-29T13:11:21Z","renewTime":"2018-11-29T13:48:10Z","leaderTransitions":0}' 214 | creationTimestamp: 2018-11-29T13:11:21Z 215 | name: kube-controller-manager 216 | namespace: kube-system 217 | resourceVersion: "3134" 218 | selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager 219 | uid: 4452bff1-f3d8-11e8-a5a6-0050568fef9b 220 | ``` 221 | 222 | 可见,当前的 leader 为 kube-node1 节点。 223 | 224 | 现在停掉kube-node1上的kube-controller-manager。 225 | 226 | ``` 227 | $systemctl stop kube-controller-manager 228 | $systemctl status kube-controller-manager |grep Active 229 | Active: inactive (dead) since Sat 2018-11-24 00:52:53 CST; 44s ago 230 | ``` 231 | 232 | 大概一分钟后,再查看一下当前的leader: 233 | 234 | ``` 235 | $kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml 236 | apiVersion: v1 237 | kind: Endpoints 238 | metadata: 239 | annotations: 240 | control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"kube-node3_45525ae6-f3d8-11e8-a2b8-0050568fbcaa","leaseDurationSeconds":15,"acquireTime":"2018-11-29T13:49:28Z","renewTime":"2018-11-29T13:49:28Z","leaderTransitions":1}' 241 | creationTimestamp: 2018-11-29T13:11:21Z 242 | name: kube-controller-manager 243 | namespace: kube-system 244 | resourceVersion: "3227" 245 | selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager 246 | uid: 4452bff1-f3d8-11e8-a5a6-0050568fef9b 247 | ``` 248 | 249 | 可以看到已经自动漂移到kube-node3上去了。 250 | 251 | #### 5,查验kube-proxy功能。 252 | 253 | 查看 ipvs 路由规则 254 | 255 | ``` 256 | cat > magic.sh << "EOF" 257 | #!/bin/bash 258 | 259 | source /opt/k8s/bin/environment.sh 260 | 261 | for node_ip in ${NODE_IPS[@]} 262 | do 263 | echo ">>> ${node_ip}" 264 | ssh root@${node_ip} "/usr/sbin/ipvsadm -ln" 265 | done 266 | EOF 267 | ``` 268 | 269 | 输出: 270 | 271 | ``` 272 | $bash magic.sh 273 | >>> 192.168.111.120 274 | IP Virtual Server version 1.2.1 (size=4096) 275 | Prot LocalAddress:Port Scheduler Flags 276 | -> RemoteAddress:Port Forward Weight ActiveConn InActConn 277 | TCP 10.254.0.1:443 rr persistent 10800 278 | -> 192.168.111.120:6443 Masq 1 0 0 279 | -> 192.168.111.121:6443 Masq 1 0 0 280 | -> 192.168.111.122:6443 Masq 1 0 0 281 | >>> 192.168.111.121 282 | IP Virtual Server version 1.2.1 (size=4096) 283 | Prot LocalAddress:Port Scheduler Flags 284 | -> RemoteAddress:Port Forward Weight ActiveConn InActConn 285 | TCP 10.254.0.1:443 rr persistent 10800 286 | -> 192.168.111.120:6443 Masq 1 0 0 287 | -> 192.168.111.121:6443 Masq 1 0 0 288 | -> 192.168.111.122:6443 Masq 1 0 0 289 | >>> 192.168.111.122 290 | IP Virtual Server version 1.2.1 (size=4096) 291 | Prot LocalAddress:Port Scheduler Flags 292 | -> RemoteAddress:Port Forward Weight ActiveConn InActConn 293 | TCP 10.254.0.1:443 rr persistent 10800 294 | -> 192.168.111.120:6443 Masq 1 0 0 295 | -> 192.168.111.121:6443 Masq 1 0 0 296 | -> 192.168.111.122:6443 Masq 1 0 0 297 | ``` 298 | 299 | #### 6,创建一个应用。 300 | 301 | 查看集群节点: 302 | 303 | ``` 304 | $kubectl get node 305 | NAME STATUS ROLES AGE VERSION 306 | kube-node1 Ready 45m v1.10.4 307 | kube-node2 Ready 45m v1.10.4 308 | kube-node3 Ready 45m v1.10.4 309 | ``` 310 | 311 | 创建测试应用: 312 | 313 | ``` 314 | cat > nginx-ds.yml < magic.sh << "EOF" 371 | #!/bin/bash 372 | 373 | source /opt/k8s/bin/environment.sh 374 | 375 | for node_ip in ${NODE_IPS[@]} 376 | do 377 | echo ">>> ${node_ip}" 378 | ssh ${node_ip} "ping -c 1 172.30.87.2" 379 | ssh ${node_ip} "ping -c 1 172.30.99.2" 380 | ssh ${node_ip} "ping -c 1 172.30.55.2" 381 | done 382 | EOF 383 | ``` 384 | 385 | 检查服务 IP 和端口可达性 386 | 387 | ``` 388 | $kubectl get svc |grep nginx-ds 389 | nginx-ds NodePort 10.254.110.153 80:8781/TCP 6h 390 | ``` 391 | 392 | 在所有 Node 上 curl Service IP: 393 | 394 | ``` 395 | cat > magic.sh << "EOF" 396 | #!/bin/bash 397 | 398 | source /opt/k8s/bin/environment.sh 399 | 400 | for node_ip in ${NODE_IPS[@]} 401 | do 402 | echo ">>> ${node_ip}" 403 | ssh ${node_ip} "curl 10.254.128.98" 404 | done 405 | EOF 406 | ``` 407 | 408 | 检查服务的 NodePort 可达性 409 | 410 | ``` 411 | cat > magic.sh << "EOF" 412 | #!/bin/bash 413 | 414 | source /opt/k8s/bin/environment.sh 415 | 416 | for node_ip in ${NODE_IPS[@]} 417 | do 418 | echo ">>> ${node_ip}" 419 | ssh ${node_ip} "curl ${node_ip}:8996" 420 | done 421 | EOF 422 | ``` 423 | -------------------------------------------------------------------------------- /magic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #author:eryajf 4 | #blog:www.eryajf.net 5 | #time:2018-11 6 | #version:v1 7 | # 8 | 9 | base_dir=$(pwd) 10 | set -e 11 | mkdir -p /opt/k8s/bin/ && cp $base_dir/config/environment.sh /opt/k8s/bin/ 12 | source /opt/k8s/bin/environment.sh 13 | 14 | # 15 | ##set color## 16 | echoRed() { echo $'\e[0;31m'"$1"$'\e[0m'; } 17 | echoGreen() { echo $'\e[0;32m'"$1"$'\e[0m'; } 18 | echoYellow() { echo $'\e[0;33m'"$1"$'\e[0m'; } 19 | ##set color## 20 | # 21 | 22 | Kcsh(){ 23 | 24 | source /opt/k8s/bin/environment.sh 25 | for node_ip in ${NODE_IPS[@]} 26 | do 27 | echoGreen ">>> ${node_ip}" 28 | ssh root@${node_ip} "yum install -y epel-release conntrack ipvsadm ipset sysstat curl iptables libseccomp keepalived haproxy" 29 | # ssh root@${node_ip} "systemctl stop firewalld && systemctl disable firewalld" 30 | ssh root@${node_ip} "iptables -F && sudo iptables -X && sudo iptables -F -t nat && sudo iptables -X -t nat && iptables -P FORWARD ACCEPT" 31 | ssh root@${node_ip} "swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab" 32 | scp $base_dir/config/Kcsh/hosts root@${node_ip}:/etc/hosts 33 | scp $base_dir/config/Kcsh/kubernetes.conf root@${node_ip}:/etc/sysctl.d/kubernetes.conf 34 | ssh root@${node_ip} "modprobe br_netfilter && modprobe ip_vs" 35 | ssh root@${node_ip} "sysctl -p /etc/sysctl.d/kubernetes.conf" 36 | ssh root@${node_ip} 'yum -y install wget ntpdate lrzsz curl rsync && ntpdate -u cn.pool.ntp.org && echo "* * * * * /usr/sbin/ntpdate -u cn.pool.ntp.org &> /dev/null" > /var/spool/cron/root' 37 | ssh root@${node_ip} 'mkdir -p /opt/k8s/bin && mkdir -p /etc/kubernetes/cert' 38 | ssh root@${node_ip} 'mkdir -p /etc/etcd/cert && mkdir -p /var/lib/etcd' 39 | scp $base_dir/config/environment.sh root@${node_ip}:/opt/k8s/bin/ 40 | ssh root@${node_ip} "chmod +x /opt/k8s/bin/*" 41 | done 42 | } 43 | 44 | Kzs(){ 45 | 46 | cp $base_dir/pack/cfssljson_linux-amd64 /opt/k8s/bin/cfssljson 47 | cp $base_dir/pack/cfssl_linux-amd64 /opt/k8s/bin/cfssl 48 | cp $base_dir/pack/cfssl-certinfo_linux-amd64 /opt/k8s/bin/cfssl-certinfo 49 | chmod +x /opt/k8s/bin/* 50 | export PATH=/opt/k8s/bin:$PATH 51 | 52 | cd $base_dir/config/Kzs/ && cfssl gencert -initca ca-csr.json | cfssljson -bare ca 53 | 54 | for node_ip in ${NODE_IPS[@]} 55 | do 56 | echoGreen ">>> ${node_ip}" 57 | scp $base_dir/config/Kzs/{ca*.pem,ca-config.json} root@${node_ip}:/etc/kubernetes/cert 58 | done 59 | } 60 | 61 | Kctl(){ 62 | 63 | tar xf $base_dir/pack/kubernetes-client-linux-amd64.tar.gz -C $base_dir/config/Kctl/client 64 | tar xf $base_dir/pack/kubernetes-server-linux-amd64.tar.gz -C $base_dir/config/Kctl/server 65 | for node_ip in ${NODE_IPS[@]} 66 | do 67 | echoGreen ">>> ${node_ip}" 68 | scp $base_dir/config/Kctl/client/kubernetes/client/bin/kubectl root@${node_ip}:/opt/k8s/bin/ 69 | scp $base_dir/config/Kctl/server/kubernetes/server/bin/* root@${node_ip}:/opt/k8s/bin/ 70 | ssh root@${node_ip} "chmod +x /opt/k8s/bin/*" 71 | done 72 | 73 | source /opt/k8s/bin/environment.sh 74 | cd $base_dir/config/Kctl/ 75 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 76 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 77 | -config=/etc/kubernetes/cert/ca-config.json \ 78 | -profile=kubernetes admin-csr.json | cfssljson -bare admin 79 | 80 | # 设置集群参数 81 | kubectl config set-cluster kubernetes \ 82 | --certificate-authority=/etc/kubernetes/cert/ca.pem \ 83 | --embed-certs=true \ 84 | --server=${KUBE_APISERVER} \ 85 | --kubeconfig=kubectl.kubeconfig 86 | 87 | # 设置客户端认证参数 88 | kubectl config set-credentials admin \ 89 | --client-certificate=admin.pem \ 90 | --client-key=admin-key.pem \ 91 | --embed-certs=true \ 92 | --kubeconfig=kubectl.kubeconfig 93 | 94 | # 设置上下文参数 95 | kubectl config set-context kubernetes \ 96 | --cluster=kubernetes \ 97 | --user=admin \ 98 | --kubeconfig=kubectl.kubeconfig 99 | 100 | # 设置默认上下文 101 | kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig 102 | 103 | source /opt/k8s/bin/environment.sh 104 | for node_ip in ${NODE_IPS[@]} 105 | do 106 | echoGreen ">>> ${node_ip}" 107 | ssh root@${node_ip} "mkdir -p ~/.kube" 108 | scp $base_dir/config/Kctl/kubectl.kubeconfig root@${node_ip}:~/.kube/config 109 | done 110 | } 111 | 112 | Ketcd(){ 113 | 114 | tar xf $base_dir/pack/etcd-v3.3.7-linux-amd64.tar.gz -C $base_dir/config/Ketcd 115 | cd $base_dir/config/Ketcd 116 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 117 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 118 | -config=/etc/kubernetes/cert/ca-config.json \ 119 | -profile=kubernetes etcd-csr.json | cfssljson -bare etcd 120 | 121 | source /opt/k8s/bin/environment.sh 122 | for node_ip in ${NODE_IPS[@]} 123 | do 124 | echoGreen ">>> ${node_ip}" 125 | scp $base_dir/config/Ketcd/etcd-v3.3.7-linux-amd64/etcd* root@${node_ip}:/opt/k8s/bin 126 | ssh root@${node_ip} "chmod +x /opt/k8s/bin/*" 127 | ssh root@${node_ip} "mkdir -p /etc/etcd/cert" 128 | scp $base_dir/config/Ketcd/etcd*.pem root@${node_ip}:/etc/etcd/cert/ 129 | done 130 | 131 | cat > etcd.service.template < etcd-${NODE_IPS[i]}.service 174 | done 175 | 176 | for node_ip in ${NODE_IPS[@]} 177 | do 178 | echoGreen ">>> ${node_ip}" 179 | cd $base_dir/config/Ketcd/ 180 | ssh root@${node_ip} "mkdir -p /var/lib/etcd" 181 | scp $base_dir/config/Ketcd/etcd-${node_ip}.service root@${node_ip}:/etc/systemd/system/etcd.service 182 | done 183 | 184 | source /opt/k8s/bin/environment.sh 185 | for node_ip in ${NODE_IPS[@]} 186 | do 187 | echo ">>> ${node_ip}" 188 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable etcd && systemctl start etcd" & 189 | sleep 3 190 | ssh root@${node_ip} "systemctl status etcd|grep Active" 191 | done 192 | 193 | echoYellow "检测etcd服务是否正常" 194 | ETCDCTL_API=3 /opt/k8s/bin/etcdctl \ 195 | --endpoints=https://${node_ip}:2379 \ 196 | --cacert=/etc/kubernetes/cert/ca.pem \ 197 | --cert=/etc/etcd/cert/etcd.pem \ 198 | --key=/etc/etcd/cert/etcd-key.pem endpoint health 199 | } 200 | 201 | Knet (){ 202 | 203 | tar xf $base_dir/pack/flannel-v0.10.0-linux-amd64.tar.gz -C $base_dir/config/Knet/flannel 204 | source /opt/k8s/bin/environment.sh 205 | for node_ip in ${NODE_IPS[@]} 206 | do 207 | echoGreen ">>> ${node_ip}" 208 | scp $base_dir/config/Knet/flannel/{flanneld,mk-docker-opts.sh} root@${node_ip}:/opt/k8s/bin/ 209 | ssh root@${node_ip} "chmod +x /opt/k8s/bin/*" 210 | done 211 | 212 | cd $base_dir/config/Knet 213 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 214 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 215 | -config=/etc/kubernetes/cert/ca-config.json \ 216 | -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld 217 | 218 | source /opt/k8s/bin/environment.sh 219 | for node_ip in ${NODE_IPS[@]} 220 | do 221 | echoGreen ">>> ${node_ip}" 222 | ssh root@${node_ip} "mkdir -p /etc/flanneld/cert" 223 | scp $base_dir/config/Knet/flanneld*.pem root@${node_ip}:/etc/flanneld/cert 224 | done 225 | 226 | etcdctl \ 227 | --endpoints=${ETCD_ENDPOINTS} \ 228 | --ca-file=/etc/kubernetes/cert/ca.pem \ 229 | --cert-file=/etc/flanneld/cert/flanneld.pem \ 230 | --key-file=/etc/flanneld/cert/flanneld-key.pem \ 231 | set ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}' 232 | 233 | source /opt/k8s/bin/environment.sh 234 | cat > flanneld.service << EOF 235 | [Unit] 236 | Description=Flanneld overlay address etcd agent 237 | After=network.target 238 | After=network-online.target 239 | Wants=network-online.target 240 | After=etcd.service 241 | Before=docker.service 242 | 243 | [Service] 244 | Type=notify 245 | ExecStart=/opt/k8s/bin/flanneld \\ 246 | -etcd-cafile=/etc/kubernetes/cert/ca.pem \\ 247 | -etcd-certfile=/etc/flanneld/cert/flanneld.pem \\ 248 | -etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \\ 249 | -etcd-endpoints=${ETCD_ENDPOINTS} \\ 250 | -etcd-prefix=${FLANNEL_ETCD_PREFIX} \\ 251 | -iface=${VIP_IF} 252 | ExecStartPost=/opt/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker 253 | Restart=on-failure 254 | 255 | [Install] 256 | WantedBy=multi-user.target 257 | RequiredBy=docker.service 258 | EOF 259 | 260 | source /opt/k8s/bin/environment.sh 261 | for node_ip in ${NODE_IPS[@]} 262 | do 263 | echoGreen ">>> ${node_ip}" 264 | scp $base_dir/config/Knet/flanneld.service root@${node_ip}:/etc/systemd/system/ 265 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl start flanneld" 266 | ssh root@${node_ip} "systemctl status flanneld|grep Active" 267 | done 268 | } 269 | 270 | Kmaster (){ 271 | 272 | Kha(){ 273 | 274 | source /opt/k8s/bin/environment.sh 275 | cat > $base_dir/config/Kmaster/Kha/keepalived-master.conf < $base_dir/config/Kmaster/Kha/keepalived-backup.conf <>> ${node_ip}" 337 | scp $base_dir/config/Kmaster/Kha/haproxy.cfg root@${node_ip}:/etc/haproxy 338 | ssh root@${node_ip} "systemctl start haproxy" 339 | ssh root@${node_ip} "systemctl status haproxy|grep Active" 340 | ssh root@${node_ip} "netstat -lnpt|grep haproxy" 341 | ssh root@${node_ip} "systemctl start keepalived" 342 | ssh root@${node_ip} "systemctl status keepalived|grep Active" 343 | done 344 | } 345 | 346 | Kapi(){ 347 | 348 | source /opt/k8s/bin/environment.sh 349 | cd $base_dir/config/Kmaster/Kapi/ 350 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 351 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 352 | -config=/etc/kubernetes/cert/ca-config.json \ 353 | -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes 354 | 355 | cat > $base_dir/config/Kmaster/Kapi/kube-apiserver.service.template << EOF 356 | [Unit] 357 | Description=Kubernetes API Server 358 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 359 | After=network.target 360 | 361 | [Service] 362 | ExecStart=/opt/k8s/bin/kube-apiserver \\ 363 | --enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\ 364 | --anonymous-auth=false \\ 365 | --experimental-encryption-provider-config=/etc/kubernetes/encryption-config.yaml \\ 366 | --advertise-address=##NODE_IP## \\ 367 | --bind-address=##NODE_IP## \\ 368 | --insecure-port=0 \\ 369 | --authorization-mode=Node,RBAC \\ 370 | --runtime-config=api/all \\ 371 | --enable-bootstrap-token-auth \\ 372 | --service-cluster-ip-range=${SERVICE_CIDR} \\ 373 | --service-node-port-range=${NODE_PORT_RANGE} \\ 374 | --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \\ 375 | --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \\ 376 | --client-ca-file=/etc/kubernetes/cert/ca.pem \\ 377 | --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \\ 378 | --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \\ 379 | --service-account-key-file=/etc/kubernetes/cert/ca-key.pem \\ 380 | --etcd-cafile=/etc/kubernetes/cert/ca.pem \\ 381 | --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \\ 382 | --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \\ 383 | --etcd-servers=${ETCD_ENDPOINTS} \\ 384 | --enable-swagger-ui=true \\ 385 | --allow-privileged=true \\ 386 | --apiserver-count=3 \\ 387 | --audit-log-maxage=30 \\ 388 | --audit-log-maxbackup=3 \\ 389 | --audit-log-maxsize=100 \\ 390 | --audit-log-path=/var/log/kube-apiserver-audit.log \\ 391 | --event-ttl=1h \\ 392 | --alsologtostderr=true \\ 393 | --logtostderr=false \\ 394 | --log-dir=/var/log/kubernetes \\ 395 | --v=2 396 | Restart=on-failure 397 | RestartSec=5 398 | Type=notify 399 | User=root 400 | LimitNOFILE=65536 401 | 402 | [Install] 403 | WantedBy=multi-user.target 404 | EOF 405 | 406 | for (( i=0; i < 3; i++ )) 407 | do 408 | cd $base_dir/config/Kmaster/Kapi/ 409 | sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-apiserver.service.template > kube-apiserver-${NODE_IPS[i]}.service 410 | done 411 | 412 | for node_ip in ${NODE_IPS[@]} 413 | do 414 | echoGreen ">>> ${node_ip}" 415 | ssh ${node_ip} "mkdir -p /etc/kubernetes/cert/" 416 | scp $base_dir/config/Kmaster/Kapi/kubernetes*.pem ${node_ip}:/etc/kubernetes/cert/ 417 | scp $base_dir/config/Kmaster/Kapi/encryption-config.yaml ${node_ip}:/etc/kubernetes/ 418 | ssh ${node_ip} "mkdir -p /var/log/kubernetes" 419 | scp $base_dir/config/Kmaster/Kapi/kube-apiserver-${node_ip}.service ${node_ip}:/etc/systemd/system/kube-apiserver.service 420 | ssh ${node_ip} "systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver" & 421 | sleep 10 422 | ssh root@${node_ip} "systemctl status kube-apiserver |grep 'Active:'" 423 | done 424 | 425 | sleep 10 426 | kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes 427 | } 428 | 429 | Kmanage(){ 430 | 431 | source /opt/k8s/bin/environment.sh 432 | 433 | cd $base_dir/config/Kmaster/Kmanage/ 434 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 435 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 436 | -config=/etc/kubernetes/cert/ca-config.json \ 437 | -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager 438 | 439 | kubectl config set-cluster kubernetes \ 440 | --certificate-authority=/etc/kubernetes/cert/ca.pem \ 441 | --embed-certs=true \ 442 | --server=${KUBE_APISERVER} \ 443 | --kubeconfig=kube-controller-manager.kubeconfig 444 | 445 | kubectl config set-credentials system:kube-controller-manager \ 446 | --client-certificate=kube-controller-manager.pem \ 447 | --client-key=kube-controller-manager-key.pem \ 448 | --embed-certs=true \ 449 | --kubeconfig=kube-controller-manager.kubeconfig 450 | 451 | kubectl config set-context system:kube-controller-manager \ 452 | --cluster=kubernetes \ 453 | --user=system:kube-controller-manager \ 454 | --kubeconfig=kube-controller-manager.kubeconfig 455 | 456 | kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig 457 | 458 | cat > $base_dir/config/Kmaster/Kmanage/kube-controller-manager.service << EOF 459 | [Unit] 460 | Description=Kubernetes Controller Manager 461 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 462 | 463 | [Service] 464 | ExecStart=/opt/k8s/bin/kube-controller-manager \\ 465 | --port=0 \\ 466 | --secure-port=10252 \\ 467 | --bind-address=127.0.0.1 \\ 468 | --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\ 469 | --service-cluster-ip-range=${SERVICE_CIDR} \\ 470 | --cluster-name=kubernetes \\ 471 | --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\ 472 | --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\ 473 | --experimental-cluster-signing-duration=8760h \\ 474 | --root-ca-file=/etc/kubernetes/cert/ca.pem \\ 475 | --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\ 476 | --leader-elect=true \\ 477 | --feature-gates=RotateKubeletServerCertificate=true \\ 478 | --controllers=*,bootstrapsigner,tokencleaner \\ 479 | --horizontal-pod-autoscaler-use-rest-clients=true \\ 480 | --horizontal-pod-autoscaler-sync-period=10s \\ 481 | --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \\ 482 | --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \\ 483 | --use-service-account-credentials=true \\ 484 | --alsologtostderr=true \\ 485 | --logtostderr=false \\ 486 | --log-dir=/var/log/kubernetes \\ 487 | --v=2 488 | Restart=on 489 | Restart=on-failure 490 | RestartSec=5 491 | User=root 492 | 493 | [Install] 494 | WantedBy=multi-user.target 495 | EOF 496 | 497 | source /opt/k8s/bin/environment.sh 498 | for node_ip in ${NODE_IPS[@]} 499 | do 500 | echoGreen ">>> ${node_ip}" 501 | scp $base_dir/config/Kmaster/Kmanage/kube-controller-manager*.pem root@${node_ip}:/etc/kubernetes/cert/ 502 | scp $base_dir/config/Kmaster/Kmanage/kube-controller-manager.kubeconfig root@${node_ip}:/etc/kubernetes/ 503 | scp $base_dir/config/Kmaster/Kmanage/kube-controller-manager.service root@${node_ip}:/etc/systemd/system/ 504 | ssh root@${node_ip} "mkdir -p /var/log/kubernetes" 505 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager" 506 | ssh root@${node_ip} "systemctl status kube-controller-manager|grep Active" 507 | done 508 | } 509 | 510 | Kscheduler(){ 511 | 512 | source /opt/k8s/bin/environment.sh 513 | cd $base_dir/config/Kmaster/Kscheduler/ 514 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 515 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 516 | -config=/etc/kubernetes/cert/ca-config.json \ 517 | -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler 518 | 519 | kubectl config set-cluster kubernetes \ 520 | --certificate-authority=/etc/kubernetes/cert/ca.pem \ 521 | --embed-certs=true \ 522 | --server=${KUBE_APISERVER} \ 523 | --kubeconfig=kube-scheduler.kubeconfig 524 | 525 | kubectl config set-credentials system:kube-scheduler \ 526 | --client-certificate=kube-scheduler.pem \ 527 | --client-key=kube-scheduler-key.pem \ 528 | --embed-certs=true \ 529 | --kubeconfig=kube-scheduler.kubeconfig 530 | 531 | kubectl config set-context system:kube-scheduler \ 532 | --cluster=kubernetes \ 533 | --user=system:kube-scheduler \ 534 | --kubeconfig=kube-scheduler.kubeconfig 535 | 536 | kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig 537 | 538 | source /opt/k8s/bin/environment.sh 539 | for node_ip in ${NODE_IPS[@]} 540 | do 541 | echoGreen ">>> ${node_ip}" 542 | scp $base_dir/config/Kmaster/Kscheduler/kube-scheduler.kubeconfig root@${node_ip}:/etc/kubernetes/ 543 | scp $base_dir/config/Kmaster/Kscheduler/kube-scheduler.service root@${node_ip}:/etc/systemd/system/ 544 | ssh root@${node_ip} "mkdir -p /var/log/kubernetes" 545 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler" 546 | ssh root@${node_ip} "systemctl status kube-scheduler|grep Active" 547 | done 548 | } 549 | 550 | echoYellow "现在开始部署高可用组件haproxy & keepalived!" 551 | Kha 552 | sleep 3 553 | echoYellow "现在开始部署kube-apiserver!" 554 | Kapi 555 | sleep 3 556 | echoYellow "现在开始部署kube-controller-manager!" 557 | Kmanage 558 | sleep 3 559 | echoYellow "现在开始部署kube-scheduler!" 560 | Kscheduler 561 | } 562 | 563 | Kwork(){ 564 | 565 | Kdocker(){ 566 | source /opt/k8s/bin/environment.sh 567 | tar xf $base_dir/pack/docker-18.03.1-ce.tgz -C $base_dir/config/Kwork/Kdocker/ 568 | 569 | cat > $base_dir/config/Kwork/Kdocker/docker.service << "EOF" 570 | [Unit] 571 | Description=Docker Application Container Engine 572 | Documentation=http://docs.docker.io 573 | 574 | [Service] 575 | Environment="PATH=/opt/k8s/bin:/bin:/sbin:/usr/bin:/usr/sbin" 576 | EnvironmentFile=-/run/flannel/docker 577 | ExecStart=/opt/k8s/bin/dockerd --log-level=error $DOCKER_NETWORK_OPTIONS 578 | ExecReload=/bin/kill -s HUP $MAINPID 579 | Restart=on-failure 580 | RestartSec=5 581 | LimitNOFILE=infinity 582 | LimitNPROC=infinity 583 | LimitCORE=infinity 584 | Delegate=yes 585 | KillMode=process 586 | 587 | [Install] 588 | WantedBy=multi-user.target 589 | EOF 590 | 591 | source /opt/k8s/bin/environment.sh 592 | for node_ip in ${NODE_IPS[@]} 593 | do 594 | echoGreen ">>> ${node_ip}" 595 | scp $base_dir/config/Kwork/Kdocker/docker/docker* root@${node_ip}:/opt/k8s/bin/ 596 | ssh root@${node_ip} "chmod +x /opt/k8s/bin/*" 597 | scp $base_dir/config/Kwork/Kdocker/docker.service root@${node_ip}:/etc/systemd/system/ 598 | ssh root@${node_ip} "mkdir -p /etc/docker/" 599 | scp $base_dir/config/Kwork/Kdocker/docker-daemon.json root@${node_ip}:/etc/docker/daemon.json 600 | ssh root@${node_ip} "/usr/sbin/iptables -F && /usr/sbin/iptables -X && /usr/sbin/iptables -F -t nat && /usr/sbin/iptables -X -t nat" 601 | ssh root@${node_ip} "/usr/sbin/iptables -P FORWARD ACCEPT" 602 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable docker && systemctl start docker" 603 | #ssh root@${node_ip} 'for intf in /sys/devices/virtual/net/docker0/brif/*; do echo 1 > $intf/hairpin_mode; done' 604 | #ssh root@${node_ip} "sysctl -p /etc/sysctl.d/kubernetes.conf" 605 | ssh root@${node_ip} "systemctl status docker|grep Active" && sleep 10 606 | ssh root@${node_ip} "/usr/sbin/ip addr show flannel.1 && /usr/sbin/ip addr show docker0" 607 | done 608 | } 609 | 610 | Kkubelet(){ 611 | 612 | source /opt/k8s/bin/environment.sh 613 | for node_name in ${NODE_NAMES[@]} 614 | do 615 | echo ">>> ${node_name}" 616 | cd $base_dir/config/Kwork/Kkubelet/ 617 | # 创建 token 618 | export BOOTSTRAP_TOKEN=$(kubeadm token create \ 619 | --description kubelet-bootstrap-token \ 620 | --groups system:bootstrappers:${node_name} \ 621 | --kubeconfig ~/.kube/config) 622 | 623 | # 设置集群参数 624 | kubectl config set-cluster kubernetes \ 625 | --certificate-authority=/etc/kubernetes/cert/ca.pem \ 626 | --embed-certs=true \ 627 | --server=${KUBE_APISERVER} \ 628 | --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig 629 | 630 | # 设置客户端认证参数 631 | kubectl config set-credentials kubelet-bootstrap \ 632 | --token=${BOOTSTRAP_TOKEN} \ 633 | --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig 634 | 635 | # 设置上下文参数 636 | kubectl config set-context default \ 637 | --cluster=kubernetes \ 638 | --user=kubelet-bootstrap \ 639 | --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig 640 | 641 | # 设置默认上下文 642 | kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig 643 | done 644 | 645 | source /opt/k8s/bin/environment.sh 646 | for node_name in ${NODE_NAMES[@]} 647 | do 648 | echoGreen ">>> ${node_name}" 649 | cd $base_dir/config/Kwork/Kkubelet/ 650 | sed -e "s/##NODE_NAME##/${node_name}/" kubelet.service.template > kubelet-${node_name}.service 651 | scp $base_dir/config/Kwork/Kkubelet/kubelet-${node_name}.service root@${node_name}:/etc/systemd/system/kubelet.service 652 | scp $base_dir/config/Kwork/Kkubelet/kubelet-bootstrap-${node_name}.kubeconfig root@${node_name}:/etc/kubernetes/kubelet-bootstrap.kubeconfig 653 | done 654 | 655 | kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers 656 | 657 | source /opt/k8s/bin/environment.sh 658 | for node_ip in ${NODE_IPS[@]} 659 | do 660 | echoGreen ">>> ${node_ip}" 661 | cd $base_dir/config/Kwork/Kkubelet/ 662 | sed -e "s/##NODE_IP##/${node_ip}/" kubelet.config.json.template > kubelet.config-${node_ip}.json 663 | scp $base_dir/config/Kwork/Kkubelet/kubelet.config-${node_ip}.json root@${node_ip}:/etc/kubernetes/kubelet.config.json 664 | ssh root@${node_ip} "mkdir -p /var/lib/kubelet" 665 | ssh root@${node_ip} "/usr/sbin/swapoff -a" 666 | ssh root@${node_ip} "mkdir -p /var/log/kubernetes" 667 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet" 668 | ssh root@${node_ip} "systemctl status kubelet | grep Active" 669 | done 670 | 671 | kubectl apply -f $base_dir/config/Kwork/Kkubelet/csr-crb.yaml 672 | } 673 | 674 | Kproxy(){ 675 | 676 | source /opt/k8s/bin/environment.sh 677 | cd $base_dir/config/Kwork/Kproxy/ 678 | cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ 679 | -ca-key=/etc/kubernetes/cert/ca-key.pem \ 680 | -config=/etc/kubernetes/cert/ca-config.json \ 681 | -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy 682 | 683 | kubectl config set-cluster kubernetes \ 684 | --certificate-authority=/etc/kubernetes/cert/ca.pem \ 685 | --embed-certs=true \ 686 | --server=${KUBE_APISERVER} \ 687 | --kubeconfig=kube-proxy.kubeconfig 688 | 689 | kubectl config set-credentials kube-proxy \ 690 | --client-certificate=kube-proxy.pem \ 691 | --client-key=kube-proxy-key.pem \ 692 | --embed-certs=true \ 693 | --kubeconfig=kube-proxy.kubeconfig 694 | 695 | kubectl config set-context default \ 696 | --cluster=kubernetes \ 697 | --user=kube-proxy \ 698 | --kubeconfig=kube-proxy.kubeconfig 699 | 700 | kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig 701 | 702 | cat > $base_dir/config/Kwork/Kproxy/kube-proxy.config.yaml.template <>> ${NODE_NAMES[i]}" 719 | cd $base_dir/config/Kwork/Kproxy/ 720 | sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-proxy.config.yaml.template > kube-proxy-${NODE_NAMES[i]}.config.yaml 721 | scp $base_dir/config/Kwork/Kproxy/kube-proxy-${NODE_NAMES[i]}.config.yaml root@${NODE_NAMES[i]}:/etc/kubernetes/kube-proxy.config.yaml 722 | done 723 | 724 | source /opt/k8s/bin/environment.sh 725 | for node_name in ${NODE_NAMES[@]} 726 | do 727 | echoGreen ">>> ${node_name}" 728 | scp $base_dir/config/Kwork/Kproxy/kube-proxy.kubeconfig root@${node_name}:/etc/kubernetes/ 729 | scp $base_dir/config/Kwork/Kproxy/kube-proxy.service root@${node_name}:/etc/systemd/system/ 730 | done 731 | 732 | source /opt/k8s/bin/environment.sh 733 | for node_ip in ${NODE_IPS[@]} 734 | do 735 | echoGreen ">>> ${node_ip}" 736 | ssh root@${node_ip} "mkdir -p /var/lib/kube-proxy" 737 | ssh root@${node_ip} "mkdir -p /var/log/kubernetes" 738 | ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl start kube-proxy" 739 | ssh root@${node_ip} "systemctl status kube-proxy|grep Active" 740 | ssh root@${node_ip} "/usr/sbin/ipvsadm -ln" 741 | done 742 | } 743 | echoYellow "现在开始部署docker服务!" 744 | Kdocker 745 | sleep 3 746 | echoYellow "现在开始部署kubelet服务!" 747 | Kkubelet 748 | sleep 3 749 | echoYellow "现在开始部署kube-proxy服务!" 750 | Kproxy 751 | } 752 | 753 | echoYellow "现在开始执行环境初始化工作!" 754 | Kcsh 755 | sleep 2 756 | echoYellow "现在开始配置证书!" 757 | Kzs 758 | sleep 2 759 | echoYellow "现在开始部署kubectl服务!" 760 | Kctl 761 | sleep 2 762 | echoYellow "现在开始部署etcd服务!" 763 | Ketcd 764 | sleep 2 765 | echoYellow "现在开始部署flannel网络服务!" 766 | Knet 767 | sleep 2 768 | echoYellow "现在开始部署master组件!" 769 | Kmaster 770 | sleep 2 771 | echoYellow "现在开始部署work组件!" 772 | Kwork 773 | 774 | echoRed "部署完成,现在可以享用k8s高可用集群各个功能了!" 775 | --------------------------------------------------------------------------------