├── .gitignore ├── scripts ├── initial_woker.sh ├── kubeadm-config.yaml ├── setup_hostname.sh ├── install_localRepo.sh ├── loadimage.sh ├── install_kubernetes.sh ├── install_docker.sh ├── initial_master.sh └── calico.yaml ├── plugins ├── dashboard-auth.yaml ├── kuboard.yaml ├── metrics-server-v0.3.6.yaml ├── ingress-nginx-v0.29.0.yaml ├── dashboard-v2.0.0-rc5.yaml └── calico-v3.10.3.yaml ├── README.md ├── repos └── CentOS-Media.repo ├── setup_master.sh ├── gpg ├── Aliyun-kubernetes-rpm-package-key.gpg ├── Docker.gpg └── Aliyun-kubernetes-yum-key.gpg └── setup_worker.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /scripts/initial_woker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 导入镜像 4 | . ./loadimage.sh 5 | 6 | echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts 7 | 8 | kubeadm join ${APISERVER_NAME}:${APISERVER_PORT} \ 9 | --token ${TOKEN} \ 10 | --discovery-token-ca-cert-hash ${CERT} 11 | -------------------------------------------------------------------------------- /scripts/kubeadm-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1beta2 2 | kind: ClusterConfiguration 3 | kubernetesVersion: v1.17.1 4 | controlPlaneEndpoint: "apiserver.k8s.com:6443" 5 | networking: 6 | serviceSubnet: "10.96.0.0/16" 7 | podSubnet: "10.11.10.0/16" 8 | dnsDomain: "cluster.local" 9 | -------------------------------------------------------------------------------- /scripts/setup_hostname.sh: -------------------------------------------------------------------------------- 1 | #!bin/bash 2 | 3 | if [ ! -n "$1" ] ; then 4 | echo -e "\033[31;1mError:请传入hostname \033[0m" 5 | echo 用法 setup_hostname yourname 6 | exit 1 7 | fi 8 | 9 | # 修改 hostname, 10 | hostnamectl set-hostname ${1} 11 | # 查看修改结果 12 | hostnamectl status 13 | 14 | # 设置 hostname 解析,这个命令需要root账户才能执行。 15 | echo "127.0.0.1 $(hostname)" >> /etc/hosts -------------------------------------------------------------------------------- /plugins/dashboard-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kubernetes-dashboard 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRoleBinding 10 | metadata: 11 | name: admin-user 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: cluster-admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: admin-user 19 | namespace: kubernetes-dashboard -------------------------------------------------------------------------------- /scripts/install_localRepo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rpm -ivh ../packages/deltarpm-3.6-3.el7.x86_64.rpm 4 | rpm -ivh ../packages/libxml2-python-2.9.1-6.el7_2.3.x86_64.rpm 5 | rpm -ivh ../packages/python-deltarpm-3.6-3.el7.x86_64.rpm 6 | rpm -ivh ../packages/createrepo-0.9.9-28.el7.noarch.rpm 7 | 8 | createrepo /root/k8sOfflineSetup/packages 9 | 10 | # 备份现有源 11 | if [ -f "/etc/yum.repos.d/CentOS-Base.repo" ];then 12 | mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak_$(date "+%Y-%m-%d_%H-%M-%S") 13 | fi 14 | cp -f ../repos/CentOS-Media.repo /etc/yum.repos.d/ 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 何为离线安装Kubernetes 2 | 3 | 在很多企业内网中没有互联网链接,要部署Kubernetes非常的不方便,这里我制作了Kubernetes离线资源包,配套制作和安装教程,可以非常方便的在内网中部署Kubernetes群集。 4 | 5 | ## 如何制作离线资源包 6 | 7 | 请看教程: 8 | 9 | 下载yum包和docker image,分别放到`packages`和`images`两个目录中再与本仓库合并。制作完的目录结构应该如下: 10 | 11 | ```text 12 | 🗜️k8sOfflineSetup.tar.gz 13 | ├── 📁gpg 14 | ├── 📁plugins 15 | ├── 📁repos 16 | ├── 📁scripts 17 | ├── 📁packages 18 | ├── 📁images 19 | ├── 📃setup_master.sh 20 | └── 📃setup_worker.sh 21 | ``` 22 | 23 | ## 如何使用离线资源包 24 | 25 | - [离线安装Kubernetes v1.17.1 - 准备离线资源包](https://www.jianshu.com/p/93e7cb4da070) 26 | - [离线安装Kubernetes v1.17.1 - 离线部署](https://www.jianshu.com/p/fd9f1076ea2d) 27 | -------------------------------------------------------------------------------- /repos/CentOS-Media.repo: -------------------------------------------------------------------------------- 1 | # CentOS-Media.repo 2 | # 3 | # This repo can be used with mounted DVD media, verify the mount point for 4 | # CentOS-7. You can use this repo and yum to install items directly off the 5 | # DVD ISO that we release. 6 | # 7 | # To use this repo, put in your DVD and use it with the other repos too: 8 | # yum --enablerepo=c7-media [command] 9 | # 10 | # or for ONLY the media repo, do this: 11 | # 12 | # yum --disablerepo=\* --enablerepo=c7-media [command] 13 | 14 | [c7-media] 15 | name=CentOS-$releasever - Media 16 | baseurl=file:///root/k8sOfflineSetup/packages/ 17 | gpgcheck=1 18 | enabled=1 19 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 20 | file:///root/k8sOfflineSetup/gpg/Docker.gpg 21 | file:///root/k8sOfflineSetup/gpg/Aliyun-kubernetes-yum-key.gpg 22 | file:///root/k8sOfflineSetup/gpg/Aliyun-kubernetes-rpm-package-key.gpg 23 | 24 | 25 | -------------------------------------------------------------------------------- /setup_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set +e 4 | 5 | usage() { 6 | echo "使用说明:" 7 | echo "导入以下参数再执行setup_master" 8 | echo "export HOSTNAME=k8s-master" 9 | echo "export APISERVER_NAME=apiserver.k8s.com" 10 | echo "export MASTER_IP=192.168.1.30" 11 | echo "export POD_SUBNET=10.11.10.0/16" 12 | echo "" 13 | } 14 | 15 | printEnvriment() { 16 | echo "已设置环境变量参数:" 17 | echo "HOSTNAME="${HOSTNAME} 18 | echo "APISERVER_NAME="${APISERVER_NAME} 19 | echo "MASTER_IP="${MASTER_IP} 20 | echo "POD_SUBNET="${POD_SUBNET} 21 | echo "" 22 | } 23 | 24 | if [ ${#HOSTNAME} -eq 0 ] || [ ${#APISERVER_NAME} -eq 0 ] || [ ${#MASTER_IP} -eq 0 ] || [ ${#POD_SUBNET} -eq 0 ] ; then 25 | usage 26 | 27 | echo -e "\033[31;1m缺少环境变量参数 \033[0m" 28 | printEnvriment 29 | exit 1 30 | fi 31 | 32 | cd ./scripts/ 33 | source ./setup_hostname.sh ${HOSTNAME} 34 | source ./install_localRepo.sh 35 | source ./install_docker.sh 36 | source ./install_kubernetes.sh 37 | source ./initial_master.sh 38 | cd .. 39 | -------------------------------------------------------------------------------- /gpg/Aliyun-kubernetes-rpm-package-key.gpg: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1 3 | 4 | mQENBFWKtqgBCADmKQWYQF9YoPxLEQZ5XA6DFVg9ZHG4HIuehsSJETMPQ+W9K5c5 5 | Us5assCZBjG/k5i62SmWb09eHtWsbbEgexURBWJ7IxA8kM3kpTo7bx+LqySDsSC3 6 | /8JRkiyibVV0dDNv/EzRQsGDxmk5Xl8SbQJ/C2ECSUT2ok225f079m2VJsUGHG+5 7 | RpyHHgoMaRNedYP8ksYBPSD6sA3Xqpsh/0cF4sm8QtmsxkBmCCIjBa0B0LybDtdX 8 | XIq5kPJsIrC2zvERIPm1ez/9FyGmZKEFnBGeFC45z5U//pHdB1z03dYKGrKdDpID 9 | 17kNbC5wl24k/IeYyTY9IutMXvuNbVSXaVtRABEBAAG0Okdvb2dsZSBDbG91ZCBQ 10 | YWNrYWdlcyBSUE0gU2lnbmluZyBLZXkgPGdjLXRlYW1AZ29vZ2xlLmNvbT6JATgE 11 | EwECACIFAlWKtqgCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPCcOUw+ 12 | G6jV+QwH/0wRH+XovIwLGfkg6kYLEvNPvOIYNQWnrT6zZ+XcV47WkJ+i5SR+QpUI 13 | udMSWVf4nkv+XVHruxydafRIeocaXY0E8EuIHGBSB2KR3HxG6JbgUiWlCVRNt4Qd 14 | 6udC6Ep7maKEIpO40M8UHRuKrp4iLGIhPm3ELGO6uc8rks8qOBMH4ozU+3PB9a0b 15 | GnPBEsZdOBI1phyftLyyuEvG8PeUYD+uzSx8jp9xbMg66gQRMP9XGzcCkD+b8w1o 16 | 7v3J3juKKpgvx5Lqwvwv2ywqn/Wr5d5OBCHEw8KtU/tfxycz/oo6XUIshgEbS/+P 17 | 6yKDuYhRp6qxrYXjmAszIT25cftb4d4= 18 | =/PbX 19 | -----END PGP PUBLIC KEY BLOCK----- 20 | -------------------------------------------------------------------------------- /scripts/loadimage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 导入镜像 4 | # kubernetes 5 | docker load -i ../images/kube-controller-manager-v1.17.1.tar 6 | docker load -i ../images/kube-apiserver-v1.17.1.tar 7 | docker load -i ../images/kube-scheduler-v1.17.1.tar 8 | docker load -i ../images/kube-proxy-v1.17.1.tar 9 | docker load -i ../images/coredns-1.6.5.tar 10 | docker load -i ../images/etcd-3.4.3-0.tar 11 | docker load -i ../images/pause-3.1.tar 12 | 13 | #calico 网络插件 14 | docker load -i ../images/calico-cni-v3.10.3.tar 15 | docker load -i ../images/calico-pod2daemon-flexvol-v3.10.3.tar 16 | docker load -i ../images/calico-node-v3.10.3.tar 17 | docker load -i ../images/calico-kube-controllers-v3.10.3.tar 18 | 19 | # nginx ingress controller 20 | docker load -i ../images/nginx-ingress-controller-0.29.0.tar 21 | 22 | # kubernetes dashboard 23 | docker load -i ../images/kubernetesui-dashboard-v2.0.0-rc5.tar 24 | docker load -i ../images/kubernetesui-metrics-scraper-v1.0.3.tar 25 | 26 | # kuboard 27 | docker load -i ../images/kuboard-latest.tar 28 | docker load -i ../images/kuboard-metrics-server-amd64-v0.3.6.tar -------------------------------------------------------------------------------- /scripts/install_kubernetes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 卸载旧版本 4 | yum remove -y kubelet kubeadm kubectl 5 | 6 | # 安装kubelet、kubeadm、kubectl 7 | yum install -y kubelet-1.17.1 kubeadm-1.17.1 kubectl-1.17.1 8 | 9 | # 修改docker Cgroup Driver为systemd 10 | # # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock 11 | # # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd 12 | # 如果不修改,在添加 worker 节点时可能会碰到如下错误 13 | # [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". 14 | # Please follow the guide at https://kubernetes.io/docs/setup/cri/ 15 | sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service 16 | 17 | # 设置 docker 镜像,提高 docker 镜像下载速度和稳定性 18 | # 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤 19 | # curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io 20 | 21 | # 重启 docker,并启动 kubelet 22 | systemctl daemon-reload 23 | systemctl restart docker 24 | systemctl enable kubelet && systemctl start kubelet 25 | kubectl version -------------------------------------------------------------------------------- /setup_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set +e 4 | 5 | usage() { 6 | echo "使用说明:" 7 | echo "导入以下参数再执行setup_worker" 8 | echo "export HOSTNAME=k8s-worker1" 9 | echo "export APISERVER_NAME=apiserver.k8s.com" 10 | echo "export APISERVER_PORT=6443 默认:6443" 11 | echo "export MASTER_IP=192.168.1.30" 12 | echo "export TOKEN=edz861.g6eci15slbefibj8" 13 | echo "export CERT=sha256:23a7e1b4..." 14 | echo "" 15 | } 16 | 17 | ensureApiServerPort() { 18 | if [ ${#APISERVER_PORT} -eq 0 ]; then 19 | # echo "set default port" 20 | export APISERVER_PORT=6443 21 | fi 22 | } 23 | 24 | printEnvriment() { 25 | echo "已设置环境变量参数:" 26 | echo "HOSTNAME="${HOSTNAME} 27 | echo "APISERVER_NAME="${APISERVER_NAME} 28 | echo "APISERVER_PORT="${APISERVER_PORT} 29 | echo "MASTER_IP="${MASTER_IP} 30 | echo "TOKEN="${TOKEN} 31 | echo "CERT="${CERT} 32 | echo "" 33 | } 34 | 35 | ensureApiServerPort 36 | 37 | if [ ${#HOSTNAME} -eq 0 ] || [ ${#APISERVER_NAME} -eq 0 ] || [ ${#APISERVER_PORT} -eq 0 ] || [ ${#MASTER_IP} -eq 0 ] || [ ${#TOKEN} -eq 0 ] || [ ${#CERT} -eq 0 ]; then 38 | usage 39 | 40 | echo -e "\033[31;1m缺少环境变量参数 \033[0m" 41 | printEnvriment 42 | exit 1 43 | fi 44 | 45 | cd ./scripts/ 46 | source ./setup_hostname.sh ${HOSTNAME} 47 | source ./install_localRepo.sh 48 | source ./install_docker.sh 49 | source ./install_kubernetes.sh 50 | source ./initial_woker.sh 51 | cd .. 52 | -------------------------------------------------------------------------------- /scripts/install_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 在 master 节点和 worker 节点都要执行 4 | 5 | # 安装 docker 6 | # 参考文档如下 7 | # https://docs.docker.com/install/linux/docker-ce/centos/ 8 | # https://docs.docker.com/install/linux/linux-postinstall/ 9 | 10 | # 卸载旧版本 11 | yum remove docker \ 12 | docker-client \ 13 | docker-client-latest \ 14 | docker-common \ 15 | docker-latest \ 16 | docker-latest-logrotate \ 17 | docker-logrotate \ 18 | docker-engine 19 | 20 | # 安装并启动 docker 21 | yum install -y docker-ce-19.03.5 docker-ce-cli-19.03.5 containerd.io 22 | systemctl enable docker 23 | systemctl start docker 24 | 25 | # 关闭 防火墙 26 | systemctl stop firewalld 27 | systemctl disable firewalld 28 | 29 | # 关闭 SeLinux 30 | setenforce 0 31 | sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 32 | 33 | # 关闭 swap 34 | swapoff -a 35 | yes | cp /etc/fstab /etc/fstab_bak 36 | cat /etc/fstab_bak |grep -v swap > /etc/fstab 37 | 38 | # 修改 /etc/sysctl.conf 39 | # 如果有配置,则修改 40 | sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf 41 | sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf 42 | sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf 43 | # 可能没有,追加 44 | echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf 45 | echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf 46 | echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf 47 | # 执行命令以应用 48 | sysctl -p 49 | 50 | docker version -------------------------------------------------------------------------------- /gpg/Docker.gpg: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | mQINBFit5IEBEADDt86QpYKz5flnCsOyZ/fk3WwBKxfDjwHf/GIflo+4GWAXS7wJ 4 | 1PSzPsvSDATV10J44i5WQzh99q+lZvFCVRFiNhRmlmcXG+rk1QmDh3fsCCj9Q/yP 5 | w8jn3Hx0zDtz8PIB/18ReftYJzUo34COLiHn8WiY20uGCF2pjdPgfxE+K454c4G7 6 | gKFqVUFYgPug2CS0quaBB5b0rpFUdzTeI5RCStd27nHCpuSDCvRYAfdv+4Y1yiVh 7 | KKdoe3Smj+RnXeVMgDxtH9FJibZ3DK7WnMN2yeob6VqXox+FvKYJCCLkbQgQmE50 8 | uVK0uN71A1mQDcTRKQ2q3fFGlMTqJbbzr3LwnCBE6hV0a36t+DABtZTmz5O69xdJ 9 | WGdBeePCnWVqtDb/BdEYz7hPKskcZBarygCCe2Xi7sZieoFZuq6ltPoCsdfEdfbO 10 | +VBVKJnExqNZCcFUTEnbH4CldWROOzMS8BGUlkGpa59Sl1t0QcmWlw1EbkeMQNrN 11 | spdR8lobcdNS9bpAJQqSHRZh3cAM9mA3Yq/bssUS/P2quRXLjJ9mIv3dky9C3udM 12 | +q2unvnbNpPtIUly76FJ3s8g8sHeOnmYcKqNGqHq2Q3kMdA2eIbI0MqfOIo2+Xk0 13 | rNt3ctq3g+cQiorcN3rdHPsTRSAcp+NCz1QF9TwXYtH1XV24A6QMO0+CZwARAQAB 14 | tCtEb2NrZXIgUmVsZWFzZSAoQ0UgcnBtKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3 15 | BBMBCgAhBQJYrep4AhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEMUv62ti 16 | Hp816C0P/iP+1uhSa6Qq3TIc5sIFE5JHxOO6y0R97cUdAmCbEqBiJHUPNQDQaaRG 17 | VYBm0K013Q1gcJeUJvS32gthmIvhkstw7KTodwOM8Kl11CCqZ07NPFef1b2SaJ7l 18 | TYpyUsT9+e343ph+O4C1oUQw6flaAJe+8ATCmI/4KxfhIjD2a/Q1voR5tUIxfexC 19 | /LZTx05gyf2mAgEWlRm/cGTStNfqDN1uoKMlV+WFuB1j2oTUuO1/dr8mL+FgZAM3 20 | ntWFo9gQCllNV9ahYOON2gkoZoNuPUnHsf4Bj6BQJnIXbAhMk9H2sZzwUi9bgObZ 21 | XO8+OrP4D4B9kCAKqqaQqA+O46LzO2vhN74lm/Fy6PumHuviqDBdN+HgtRPMUuao 22 | xnuVJSvBu9sPdgT/pR1N9u/KnfAnnLtR6g+fx4mWz+ts/riB/KRHzXd+44jGKZra 23 | IhTMfniguMJNsyEOO0AN8Tqcl0eRBxcOArcri7xu8HFvvl+e+ILymu4buusbYEVL 24 | GBkYP5YMmScfKn+jnDVN4mWoN1Bq2yMhMGx6PA3hOvzPNsUoYy2BwDxNZyflzuAi 25 | g59mgJm2NXtzNbSRJbMamKpQ69mzLWGdFNsRd4aH7PT7uPAURaf7B5BVp3UyjERW 26 | 5alSGnBqsZmvlRnVH5BDUhYsWZMPRQS9rRr4iGW0l+TH+O2VJ8aQ 27 | =0Zqq 28 | -----END PGP PUBLIC KEY BLOCK----- 29 | -------------------------------------------------------------------------------- /scripts/initial_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # 导入镜像 6 | . ./loadimage.sh 7 | 8 | 9 | if [ ${#POD_SUBNET} -eq 0 ] || [ ${#APISERVER_NAME} -eq 0 ]; then 10 | echo -e "\033[31;1m请确保您已经设置了环境变量 POD_SUBNET 和 APISERVER_NAME \033[0m" 11 | echo 当前POD_SUBNET=$POD_SUBNET 12 | echo 当前APISERVER_NAME=$APISERVER_NAME 13 | exit 1 14 | fi 15 | 16 | echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts 17 | 18 | # 查看完整配置选项 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 19 | rm -f ./kubeadm-config.yaml 20 | cat < ./kubeadm-config.yaml 21 | apiVersion: kubeadm.k8s.io/v1beta2 22 | kind: ClusterConfiguration 23 | kubernetesVersion: v1.17.1 24 | controlPlaneEndpoint: "${APISERVER_NAME}:6443" 25 | networking: 26 | serviceSubnet: "10.96.0.0/16" 27 | podSubnet: "${POD_SUBNET}" 28 | dnsDomain: "cluster.local" 29 | EOF 30 | 31 | # kubeadm init 32 | # 根据您服务器网速的情况,您需要等候 3 - 10 分钟 33 | kubeadm init --config=kubeadm-config.yaml --upload-certs 34 | 35 | # 配置 kubectl 36 | rm -rf /root/.kube/ 37 | mkdir /root/.kube/ 38 | cp -i /etc/kubernetes/admin.conf /root/.kube/config 39 | 40 | # 安装 calico 网络插件 41 | # 参考文档 https://docs.projectcalico.org/v3.10/getting-started/kubernetes/ 42 | echo "安装calico-3.10.3" 43 | rm -f calico.yaml 44 | cp ../plugins/calico-v3.10.3.yaml ./calico.yaml 45 | sed -i "s#192\.168\.0\.0/16#${POD_SUBNET}#" calico.yaml 46 | kubectl apply -f calico.yaml 47 | 48 | # 安装 nginx ingress controll 49 | echo "安装nginx ingress controll" 50 | kubectl apply -f ../plugins/ingress-nginx-v0.29.0.yaml 51 | 52 | # 安装 Dashboard 53 | echo "安装 Dashboard" 54 | kubectl apply -f ../plugins/dashboard-auth.yaml 55 | kubectl apply -f ../plugins/dashboard-v2.0.0-rc5.yaml 56 | 57 | # 安装 Kuboard 58 | echo "安装 Kuboard" 59 | kubectl apply -f ../plugins/kuboard.yaml 60 | kubectl apply -f ../plugins/metrics-server-v0.3.6.yaml 61 | -------------------------------------------------------------------------------- /gpg/Aliyun-kubernetes-yum-key.gpg: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1 3 | 4 | mQENBFUd6rIBCAD6mhKRHDn3UrCeLDp7U5IE7AhhrOCPpqGF7mfTemZYHf/5Jdjx 5 | cOxoSFlK7zwmFr3lVqJ+tJ9L1wd1K6P7RrtaNwCiZyeNPf/Y86AJ5NJwBe0VD0xH 6 | TXzPNTqRSByVYtdN94NoltXUYFAAPZYQls0x0nUD1hLMlOlC2HdTPrD1PMCnYq/N 7 | uL/Vk8sWrcUt4DIS+0RDQ8tKKe5PSV0+PnmaJvdF5CKawhh0qGTklS2MXTyKFoqj 8 | XgYDfY2EodI9ogT/LGr9Lm/+u4OFPvmN9VN6UG+s0DgJjWvpbmuHL/ZIRwMEn/tp 9 | uneaLTO7h1dCrXC849PiJ8wSkGzBnuJQUbXnABEBAAG0QEdvb2dsZSBDbG91ZCBQ 10 | YWNrYWdlcyBBdXRvbWF0aWMgU2lnbmluZyBLZXkgPGdjLXRlYW1AZ29vZ2xlLmNv 11 | bT6JAT4EEwECACgFAlUd6rICGy8FCQWjmoAGCwkIBwMCBhUIAgkKCwQWAgMBAh4B 12 | AheAAAoJEDdGwginMXsPcLcIAKi2yNhJMbu4zWQ2tM/rJFovazcY28MF2rDWGOnc 13 | 9giHXOH0/BoMBcd8rw0lgjmOosBdM2JT0HWZIxC/Gdt7NSRA0WOlJe04u82/o3OH 14 | WDgTdm9MS42noSP0mvNzNALBbQnlZHU0kvt3sV1YsnrxljoIuvxKWLLwren/GVsh 15 | FLPwONjw3f9Fan6GWxJyn/dkX3OSUGaduzcygw51vksBQiUZLCD2Tlxyr9NvkZYT 16 | qiaWW78L6regvATsLc9L/dQUiSMQZIK6NglmHE+cuSaoK0H4ruNKeTiQUw/EGFaL 17 | ecay6Qy/s3Hk7K0QLd+gl0hZ1w1VzIeXLo2BRlqnjOYFX4CZAQ0EWsFo2wEIAOsX 18 | XwoJuxmWjg2MC9V5xMEKenpZwFAnmhKHv4T3yNf1jOdQKs2uCZ4JwIxS9MNEPF9N 19 | oMnJtoe6B9trjeeqGRs2knjthewhr5gvp4QT16ZKZC2OtJYiJj7ZgljCwOCyByQX 20 | d26qRvTY50FCWHohsc+hcHof/9vU+BliyiYH7zjVdbUtIk9iVhsitZ/AN9C+2QVA 21 | j3Svo2SdVNCWmpCHkYs1Y1ipE2sZA+awH42tRiuSXWdS3UtEa76sJ7htJpKY1vAo 22 | xAqRE4TiROIHvYM+TvMfgubS6jRgUVYbiqwwi6oSKEn/0o1fwZgGv61aDIuiguWx 23 | 0reX7h1Wp3xyOQkzUTEAEQEAAbRAR29vZ2xlIENsb3VkIFBhY2thZ2VzIEF1dG9t 24 | YXRpYyBTaWduaW5nIEtleSA8Z2MtdGVhbUBnb29nbGUuY29tPokBPgQTAQIAKAUC 25 | WsFo2wIbLwUJBaOagAYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQagMLIboH 26 | 9Pvx7wf/VYfYs3+dU2GblNLVVgkbwH4hbzNLgGrKjPEL2IkAmpkhUdeXyDxr8e6z 27 | xF9dHtydgdyDyyNJol9CGo71Fsqd9+K5CAaurBDG4LaMFroz9ArN6NN4/QyCLrun 28 | Kssk1asUjvVGGuK1BmbNNnY+hbF+/pv5O/m/Ss9ob663Unjumf6RiC1Rop2wnPW6 29 | aLofMroBpwN/QLQKSwl0obsw5axlwHjF47Eli7Lo247opx0TPz9fIRSMi4g6WFhN 30 | 3SEfwT9IQFtdd+3v9UFALnA2rjSLM+L7pYUr97U7jYMinNDvj2iBhDV6h17E82Ev 31 | N6QpHdeEas1cn3mvko7XRWuwsU13wg== 32 | =4CNh 33 | -----END PGP PUBLIC KEY BLOCK----- 34 | -------------------------------------------------------------------------------- /plugins/kuboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kuboard 5 | namespace: kube-system 6 | annotations: 7 | k8s.eip.work/displayName: kuboard 8 | k8s.eip.work/ingress: "true" 9 | k8s.eip.work/service: NodePort 10 | k8s.eip.work/workload: kuboard 11 | labels: 12 | k8s.eip.work/layer: monitor 13 | k8s.eip.work/name: kuboard 14 | spec: 15 | replicas: 1 16 | selector: 17 | matchLabels: 18 | k8s.eip.work/layer: monitor 19 | k8s.eip.work/name: kuboard 20 | template: 21 | metadata: 22 | labels: 23 | k8s.eip.work/layer: monitor 24 | k8s.eip.work/name: kuboard 25 | spec: 26 | containers: 27 | - name: kuboard 28 | image: eipwork/kuboard:latest 29 | imagePullPolicy: IfNotPresent 30 | tolerations: 31 | - key: node-role.kubernetes.io/master 32 | effect: NoSchedule 33 | 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: kuboard 39 | namespace: kube-system 40 | spec: 41 | type: NodePort 42 | ports: 43 | - name: http 44 | port: 80 45 | targetPort: 80 46 | nodePort: 32567 47 | selector: 48 | k8s.eip.work/layer: monitor 49 | k8s.eip.work/name: kuboard 50 | 51 | --- 52 | apiVersion: v1 53 | kind: ServiceAccount 54 | metadata: 55 | name: kuboard-user 56 | namespace: kube-system 57 | 58 | --- 59 | apiVersion: rbac.authorization.k8s.io/v1 60 | kind: ClusterRoleBinding 61 | metadata: 62 | name: kuboard-user 63 | roleRef: 64 | apiGroup: rbac.authorization.k8s.io 65 | kind: ClusterRole 66 | name: cluster-admin 67 | subjects: 68 | - kind: ServiceAccount 69 | name: kuboard-user 70 | namespace: kube-system 71 | 72 | --- 73 | apiVersion: v1 74 | kind: ServiceAccount 75 | metadata: 76 | name: kuboard-viewer 77 | namespace: kube-system 78 | 79 | --- 80 | apiVersion: rbac.authorization.k8s.io/v1 81 | kind: ClusterRoleBinding 82 | metadata: 83 | name: kuboard-viewer 84 | roleRef: 85 | apiGroup: rbac.authorization.k8s.io 86 | kind: ClusterRole 87 | name: view 88 | subjects: 89 | - kind: ServiceAccount 90 | name: kuboard-viewer 91 | namespace: kube-system 92 | 93 | --- 94 | apiVersion: rbac.authorization.k8s.io/v1 95 | kind: ClusterRoleBinding 96 | metadata: 97 | name: kuboard-viewer:kuboard-minimum-role 98 | roleRef: 99 | apiGroup: rbac.authorization.k8s.io 100 | kind: ClusterRole 101 | name: kuboard-minimum-role 102 | subjects: 103 | - kind: ServiceAccount 104 | name: kuboard-viewer 105 | namespace: kube-system 106 | 107 | --- 108 | apiVersion: rbac.authorization.k8s.io/v1 109 | kind: ClusterRole 110 | metadata: 111 | name: kuboard-minimum-role 112 | rules: 113 | - apiGroups: 114 | - '' 115 | resources: 116 | - 'namespaces' 117 | - 'nodes' 118 | verbs: 119 | - 'list' 120 | 121 | --- 122 | apiVersion: extensions/v1beta1 123 | kind: Ingress 124 | metadata: 125 | name: kuboard 126 | namespace: kube-system 127 | annotations: 128 | k8s.eip.work/displayName: kuboard 129 | k8s.eip.work/workload: kuboard 130 | nginx.org/websocket-services: "kuboard" 131 | nginx.com/sticky-cookie-services: "serviceName=kuboard srv_id expires=1h path=/" 132 | spec: 133 | rules: 134 | - host: kuboard.yourdomain.com 135 | http: 136 | paths: 137 | - path: / 138 | backend: 139 | serviceName: kuboard 140 | servicePort: http 141 | -------------------------------------------------------------------------------- /plugins/metrics-server-v0.3.6.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:aggregated-metrics-reader 6 | labels: 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 9 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 10 | rules: 11 | - apiGroups: ["metrics.k8s.io"] 12 | resources: ["pods", "nodes"] 13 | verbs: ["get", "list", "watch"] 14 | 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRoleBinding 18 | metadata: 19 | name: metrics-server:system:auth-delegator 20 | roleRef: 21 | apiGroup: rbac.authorization.k8s.io 22 | kind: ClusterRole 23 | name: system:auth-delegator 24 | subjects: 25 | - kind: ServiceAccount 26 | name: metrics-server 27 | namespace: kube-system 28 | 29 | --- 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: RoleBinding 32 | metadata: 33 | name: metrics-server-auth-reader 34 | namespace: kube-system 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: Role 38 | name: extension-apiserver-authentication-reader 39 | subjects: 40 | - kind: ServiceAccount 41 | name: metrics-server 42 | namespace: kube-system 43 | 44 | --- 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | kind: ClusterRole 47 | metadata: 48 | name: system:metrics-server 49 | rules: 50 | - apiGroups: 51 | - "" 52 | resources: 53 | - pods 54 | - nodes 55 | - nodes/stats 56 | - namespaces 57 | verbs: 58 | - get 59 | - list 60 | - watch 61 | 62 | --- 63 | apiVersion: rbac.authorization.k8s.io/v1 64 | kind: ClusterRoleBinding 65 | metadata: 66 | name: system:metrics-server 67 | roleRef: 68 | apiGroup: rbac.authorization.k8s.io 69 | kind: ClusterRole 70 | name: system:metrics-server 71 | subjects: 72 | - kind: ServiceAccount 73 | name: metrics-server 74 | namespace: kube-system 75 | 76 | --- 77 | apiVersion: apiregistration.k8s.io/v1beta1 78 | kind: APIService 79 | metadata: 80 | name: v1beta1.metrics.k8s.io 81 | spec: 82 | service: 83 | name: metrics-server 84 | namespace: kube-system 85 | group: metrics.k8s.io 86 | version: v1beta1 87 | insecureSkipTLSVerify: true 88 | groupPriorityMinimum: 100 89 | versionPriority: 100 90 | 91 | --- 92 | apiVersion: v1 93 | kind: ServiceAccount 94 | metadata: 95 | name: metrics-server 96 | namespace: kube-system 97 | --- 98 | apiVersion: apps/v1 99 | kind: Deployment 100 | metadata: 101 | name: metrics-server 102 | namespace: kube-system 103 | labels: 104 | k8s-app: metrics-server 105 | spec: 106 | selector: 107 | matchLabels: 108 | k8s-app: metrics-server 109 | template: 110 | metadata: 111 | name: metrics-server 112 | labels: 113 | k8s-app: metrics-server 114 | spec: 115 | serviceAccountName: metrics-server 116 | volumes: 117 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 118 | - name: tmp-dir 119 | emptyDir: {} 120 | containers: 121 | - name: metrics-server 122 | image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6 123 | # command: 124 | # - /metrics-server 125 | # - --kubelet-insecure-tls 126 | # - --kubelet-preferred-address-types=InternalIP 127 | args: 128 | - --cert-dir=/tmp 129 | - --secure-port=4443 130 | - --kubelet-insecure-tls=true 131 | - --kubelet-preferred-address-types=InternalIP 132 | ports: 133 | - name: main-port 134 | containerPort: 4443 135 | protocol: TCP 136 | securityContext: 137 | readOnlyRootFilesystem: true 138 | runAsNonRoot: true 139 | runAsUser: 1000 140 | imagePullPolicy: IfNotPresent 141 | volumeMounts: 142 | - name: tmp-dir 143 | mountPath: /tmp 144 | nodeSelector: 145 | beta.kubernetes.io/os: linux 146 | 147 | --- 148 | apiVersion: v1 149 | kind: Service 150 | metadata: 151 | name: metrics-server 152 | namespace: kube-system 153 | labels: 154 | kubernetes.io/name: "Metrics-server" 155 | kubernetes.io/cluster-service: "true" 156 | spec: 157 | selector: 158 | k8s-app: metrics-server 159 | ports: 160 | - port: 443 161 | protocol: TCP 162 | targetPort: main-port 163 | -------------------------------------------------------------------------------- /plugins/ingress-nginx-v0.29.0.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress-nginx 5 | labels: 6 | app.kubernetes.io/name: ingress-nginx 7 | app.kubernetes.io/part-of: ingress-nginx 8 | 9 | --- 10 | 11 | kind: ConfigMap 12 | apiVersion: v1 13 | metadata: 14 | name: nginx-configuration 15 | namespace: ingress-nginx 16 | labels: 17 | app.kubernetes.io/name: ingress-nginx 18 | app.kubernetes.io/part-of: ingress-nginx 19 | 20 | --- 21 | kind: ConfigMap 22 | apiVersion: v1 23 | metadata: 24 | name: tcp-services 25 | namespace: ingress-nginx 26 | labels: 27 | app.kubernetes.io/name: ingress-nginx 28 | app.kubernetes.io/part-of: ingress-nginx 29 | 30 | --- 31 | kind: ConfigMap 32 | apiVersion: v1 33 | metadata: 34 | name: udp-services 35 | namespace: ingress-nginx 36 | labels: 37 | app.kubernetes.io/name: ingress-nginx 38 | app.kubernetes.io/part-of: ingress-nginx 39 | 40 | --- 41 | apiVersion: v1 42 | kind: ServiceAccount 43 | metadata: 44 | name: nginx-ingress-serviceaccount 45 | namespace: ingress-nginx 46 | labels: 47 | app.kubernetes.io/name: ingress-nginx 48 | app.kubernetes.io/part-of: ingress-nginx 49 | 50 | --- 51 | apiVersion: rbac.authorization.k8s.io/v1beta1 52 | kind: ClusterRole 53 | metadata: 54 | name: nginx-ingress-clusterrole 55 | labels: 56 | app.kubernetes.io/name: ingress-nginx 57 | app.kubernetes.io/part-of: ingress-nginx 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - configmaps 63 | - endpoints 64 | - nodes 65 | - pods 66 | - secrets 67 | verbs: 68 | - list 69 | - watch 70 | - apiGroups: 71 | - "" 72 | resources: 73 | - nodes 74 | verbs: 75 | - get 76 | - apiGroups: 77 | - "" 78 | resources: 79 | - services 80 | verbs: 81 | - get 82 | - list 83 | - watch 84 | - apiGroups: 85 | - "" 86 | resources: 87 | - events 88 | verbs: 89 | - create 90 | - patch 91 | - apiGroups: 92 | - "extensions" 93 | - "networking.k8s.io" 94 | resources: 95 | - ingresses 96 | verbs: 97 | - get 98 | - list 99 | - watch 100 | - apiGroups: 101 | - "extensions" 102 | - "networking.k8s.io" 103 | resources: 104 | - ingresses/status 105 | verbs: 106 | - update 107 | 108 | --- 109 | apiVersion: rbac.authorization.k8s.io/v1beta1 110 | kind: Role 111 | metadata: 112 | name: nginx-ingress-role 113 | namespace: ingress-nginx 114 | labels: 115 | app.kubernetes.io/name: ingress-nginx 116 | app.kubernetes.io/part-of: ingress-nginx 117 | rules: 118 | - apiGroups: 119 | - "" 120 | resources: 121 | - configmaps 122 | - pods 123 | - secrets 124 | - namespaces 125 | verbs: 126 | - get 127 | - apiGroups: 128 | - "" 129 | resources: 130 | - configmaps 131 | resourceNames: 132 | # Defaults to "-" 133 | # Here: "-" 134 | # This has to be adapted if you change either parameter 135 | # when launching the nginx-ingress-controller. 136 | - "ingress-controller-leader-nginx" 137 | verbs: 138 | - get 139 | - update 140 | - apiGroups: 141 | - "" 142 | resources: 143 | - configmaps 144 | verbs: 145 | - create 146 | - apiGroups: 147 | - "" 148 | resources: 149 | - endpoints 150 | verbs: 151 | - get 152 | 153 | --- 154 | apiVersion: rbac.authorization.k8s.io/v1beta1 155 | kind: RoleBinding 156 | metadata: 157 | name: nginx-ingress-role-nisa-binding 158 | namespace: ingress-nginx 159 | labels: 160 | app.kubernetes.io/name: ingress-nginx 161 | app.kubernetes.io/part-of: ingress-nginx 162 | roleRef: 163 | apiGroup: rbac.authorization.k8s.io 164 | kind: Role 165 | name: nginx-ingress-role 166 | subjects: 167 | - kind: ServiceAccount 168 | name: nginx-ingress-serviceaccount 169 | namespace: ingress-nginx 170 | 171 | --- 172 | apiVersion: rbac.authorization.k8s.io/v1beta1 173 | kind: ClusterRoleBinding 174 | metadata: 175 | name: nginx-ingress-clusterrole-nisa-binding 176 | labels: 177 | app.kubernetes.io/name: ingress-nginx 178 | app.kubernetes.io/part-of: ingress-nginx 179 | roleRef: 180 | apiGroup: rbac.authorization.k8s.io 181 | kind: ClusterRole 182 | name: nginx-ingress-clusterrole 183 | subjects: 184 | - kind: ServiceAccount 185 | name: nginx-ingress-serviceaccount 186 | namespace: ingress-nginx 187 | 188 | --- 189 | 190 | apiVersion: apps/v1 191 | kind: Deployment 192 | metadata: 193 | name: nginx-ingress-controller 194 | namespace: ingress-nginx 195 | labels: 196 | app.kubernetes.io/name: ingress-nginx 197 | app.kubernetes.io/part-of: ingress-nginx 198 | spec: 199 | replicas: 1 200 | selector: 201 | matchLabels: 202 | app.kubernetes.io/name: ingress-nginx 203 | app.kubernetes.io/part-of: ingress-nginx 204 | template: 205 | metadata: 206 | labels: 207 | app.kubernetes.io/name: ingress-nginx 208 | app.kubernetes.io/part-of: ingress-nginx 209 | annotations: 210 | prometheus.io/port: "10254" 211 | prometheus.io/scrape: "true" 212 | spec: 213 | # wait up to five minutes for the drain of connections 214 | terminationGracePeriodSeconds: 300 215 | serviceAccountName: nginx-ingress-serviceaccount 216 | nodeSelector: 217 | kubernetes.io/os: linux 218 | containers: 219 | - name: nginx-ingress-controller 220 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.29.0 221 | args: 222 | - /nginx-ingress-controller 223 | - --configmap=$(POD_NAMESPACE)/nginx-configuration 224 | - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services 225 | - --udp-services-configmap=$(POD_NAMESPACE)/udp-services 226 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx 227 | - --annotations-prefix=nginx.ingress.kubernetes.io 228 | securityContext: 229 | allowPrivilegeEscalation: true 230 | capabilities: 231 | drop: 232 | - ALL 233 | add: 234 | - NET_BIND_SERVICE 235 | # www-data -> 101 236 | runAsUser: 101 237 | env: 238 | - name: POD_NAME 239 | valueFrom: 240 | fieldRef: 241 | fieldPath: metadata.name 242 | - name: POD_NAMESPACE 243 | valueFrom: 244 | fieldRef: 245 | fieldPath: metadata.namespace 246 | ports: 247 | - name: http 248 | containerPort: 80 249 | protocol: TCP 250 | - name: https 251 | containerPort: 443 252 | protocol: TCP 253 | livenessProbe: 254 | failureThreshold: 3 255 | httpGet: 256 | path: /healthz 257 | port: 10254 258 | scheme: HTTP 259 | initialDelaySeconds: 10 260 | periodSeconds: 10 261 | successThreshold: 1 262 | timeoutSeconds: 10 263 | readinessProbe: 264 | failureThreshold: 3 265 | httpGet: 266 | path: /healthz 267 | port: 10254 268 | scheme: HTTP 269 | periodSeconds: 10 270 | successThreshold: 1 271 | timeoutSeconds: 10 272 | lifecycle: 273 | preStop: 274 | exec: 275 | command: 276 | - /wait-shutdown 277 | 278 | --- 279 | 280 | apiVersion: v1 281 | kind: LimitRange 282 | metadata: 283 | name: ingress-nginx 284 | namespace: ingress-nginx 285 | labels: 286 | app.kubernetes.io/name: ingress-nginx 287 | app.kubernetes.io/part-of: ingress-nginx 288 | spec: 289 | limits: 290 | - min: 291 | memory: 90Mi 292 | cpu: 100m 293 | type: Container 294 | -------------------------------------------------------------------------------- /plugins/dashboard-v2.0.0-rc5.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: v1 16 | kind: Namespace 17 | metadata: 18 | name: kubernetes-dashboard 19 | 20 | --- 21 | 22 | apiVersion: v1 23 | kind: ServiceAccount 24 | metadata: 25 | labels: 26 | k8s-app: kubernetes-dashboard 27 | name: kubernetes-dashboard 28 | namespace: kubernetes-dashboard 29 | 30 | --- 31 | 32 | kind: Service 33 | apiVersion: v1 34 | metadata: 35 | labels: 36 | k8s-app: kubernetes-dashboard 37 | name: kubernetes-dashboard 38 | namespace: kubernetes-dashboard 39 | spec: 40 | ports: 41 | - port: 443 42 | targetPort: 8443 43 | selector: 44 | k8s-app: kubernetes-dashboard 45 | 46 | --- 47 | 48 | apiVersion: v1 49 | kind: Secret 50 | metadata: 51 | labels: 52 | k8s-app: kubernetes-dashboard 53 | name: kubernetes-dashboard-certs 54 | namespace: kubernetes-dashboard 55 | type: Opaque 56 | 57 | --- 58 | 59 | apiVersion: v1 60 | kind: Secret 61 | metadata: 62 | labels: 63 | k8s-app: kubernetes-dashboard 64 | name: kubernetes-dashboard-csrf 65 | namespace: kubernetes-dashboard 66 | type: Opaque 67 | data: 68 | csrf: "" 69 | 70 | --- 71 | 72 | apiVersion: v1 73 | kind: Secret 74 | metadata: 75 | labels: 76 | k8s-app: kubernetes-dashboard 77 | name: kubernetes-dashboard-key-holder 78 | namespace: kubernetes-dashboard 79 | type: Opaque 80 | 81 | --- 82 | 83 | kind: ConfigMap 84 | apiVersion: v1 85 | metadata: 86 | labels: 87 | k8s-app: kubernetes-dashboard 88 | name: kubernetes-dashboard-settings 89 | namespace: kubernetes-dashboard 90 | 91 | --- 92 | 93 | kind: Role 94 | apiVersion: rbac.authorization.k8s.io/v1 95 | metadata: 96 | labels: 97 | k8s-app: kubernetes-dashboard 98 | name: kubernetes-dashboard 99 | namespace: kubernetes-dashboard 100 | rules: 101 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 102 | - apiGroups: [""] 103 | resources: ["secrets"] 104 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] 105 | verbs: ["get", "update", "delete"] 106 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 107 | - apiGroups: [""] 108 | resources: ["configmaps"] 109 | resourceNames: ["kubernetes-dashboard-settings"] 110 | verbs: ["get", "update"] 111 | # Allow Dashboard to get metrics. 112 | - apiGroups: [""] 113 | resources: ["services"] 114 | resourceNames: ["heapster", "dashboard-metrics-scraper"] 115 | verbs: ["proxy"] 116 | - apiGroups: [""] 117 | resources: ["services/proxy"] 118 | resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] 119 | verbs: ["get"] 120 | 121 | --- 122 | 123 | kind: ClusterRole 124 | apiVersion: rbac.authorization.k8s.io/v1 125 | metadata: 126 | labels: 127 | k8s-app: kubernetes-dashboard 128 | name: kubernetes-dashboard 129 | rules: 130 | # Allow Metrics Scraper to get metrics from the Metrics server 131 | - apiGroups: ["metrics.k8s.io"] 132 | resources: ["pods", "nodes"] 133 | verbs: ["get", "list", "watch"] 134 | 135 | --- 136 | 137 | apiVersion: rbac.authorization.k8s.io/v1 138 | kind: RoleBinding 139 | metadata: 140 | labels: 141 | k8s-app: kubernetes-dashboard 142 | name: kubernetes-dashboard 143 | namespace: kubernetes-dashboard 144 | roleRef: 145 | apiGroup: rbac.authorization.k8s.io 146 | kind: Role 147 | name: kubernetes-dashboard 148 | subjects: 149 | - kind: ServiceAccount 150 | name: kubernetes-dashboard 151 | namespace: kubernetes-dashboard 152 | 153 | --- 154 | 155 | apiVersion: rbac.authorization.k8s.io/v1 156 | kind: ClusterRoleBinding 157 | metadata: 158 | name: kubernetes-dashboard 159 | roleRef: 160 | apiGroup: rbac.authorization.k8s.io 161 | kind: ClusterRole 162 | name: kubernetes-dashboard 163 | subjects: 164 | - kind: ServiceAccount 165 | name: kubernetes-dashboard 166 | namespace: kubernetes-dashboard 167 | 168 | --- 169 | 170 | kind: Deployment 171 | apiVersion: apps/v1 172 | metadata: 173 | labels: 174 | k8s-app: kubernetes-dashboard 175 | name: kubernetes-dashboard 176 | namespace: kubernetes-dashboard 177 | spec: 178 | replicas: 1 179 | revisionHistoryLimit: 10 180 | selector: 181 | matchLabels: 182 | k8s-app: kubernetes-dashboard 183 | template: 184 | metadata: 185 | labels: 186 | k8s-app: kubernetes-dashboard 187 | spec: 188 | containers: 189 | - name: kubernetes-dashboard 190 | image: kubernetesui/dashboard:v2.0.0-rc5 191 | # imagePullPolicy: Always 192 | ports: 193 | - containerPort: 8443 194 | protocol: TCP 195 | args: 196 | - --auto-generate-certificates 197 | - --namespace=kubernetes-dashboard 198 | # Uncomment the following line to manually specify Kubernetes API server Host 199 | # If not specified, Dashboard will attempt to auto discover the API server and connect 200 | # to it. Uncomment only if the default does not work. 201 | # - --apiserver-host=http://my-address:port 202 | volumeMounts: 203 | - name: kubernetes-dashboard-certs 204 | mountPath: /certs 205 | # Create on-disk volume to store exec logs 206 | - mountPath: /tmp 207 | name: tmp-volume 208 | livenessProbe: 209 | httpGet: 210 | scheme: HTTPS 211 | path: / 212 | port: 8443 213 | initialDelaySeconds: 30 214 | timeoutSeconds: 30 215 | securityContext: 216 | allowPrivilegeEscalation: false 217 | readOnlyRootFilesystem: true 218 | runAsUser: 1001 219 | runAsGroup: 2001 220 | volumes: 221 | - name: kubernetes-dashboard-certs 222 | secret: 223 | secretName: kubernetes-dashboard-certs 224 | - name: tmp-volume 225 | emptyDir: {} 226 | serviceAccountName: kubernetes-dashboard 227 | nodeSelector: 228 | "beta.kubernetes.io/os": linux 229 | # Comment the following tolerations if Dashboard must not be deployed on master 230 | tolerations: 231 | - key: node-role.kubernetes.io/master 232 | effect: NoSchedule 233 | 234 | --- 235 | 236 | kind: Service 237 | apiVersion: v1 238 | metadata: 239 | labels: 240 | k8s-app: dashboard-metrics-scraper 241 | name: dashboard-metrics-scraper 242 | namespace: kubernetes-dashboard 243 | spec: 244 | ports: 245 | - port: 8000 246 | targetPort: 8000 247 | selector: 248 | k8s-app: dashboard-metrics-scraper 249 | 250 | --- 251 | 252 | kind: Deployment 253 | apiVersion: apps/v1 254 | metadata: 255 | labels: 256 | k8s-app: dashboard-metrics-scraper 257 | name: dashboard-metrics-scraper 258 | namespace: kubernetes-dashboard 259 | spec: 260 | replicas: 1 261 | revisionHistoryLimit: 10 262 | selector: 263 | matchLabels: 264 | k8s-app: dashboard-metrics-scraper 265 | template: 266 | metadata: 267 | labels: 268 | k8s-app: dashboard-metrics-scraper 269 | annotations: 270 | seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' 271 | spec: 272 | containers: 273 | - name: dashboard-metrics-scraper 274 | image: kubernetesui/metrics-scraper:v1.0.3 275 | ports: 276 | - containerPort: 8000 277 | protocol: TCP 278 | livenessProbe: 279 | httpGet: 280 | scheme: HTTP 281 | path: / 282 | port: 8000 283 | initialDelaySeconds: 30 284 | timeoutSeconds: 30 285 | volumeMounts: 286 | - mountPath: /tmp 287 | name: tmp-volume 288 | securityContext: 289 | allowPrivilegeEscalation: false 290 | readOnlyRootFilesystem: true 291 | runAsUser: 1001 292 | runAsGroup: 2001 293 | serviceAccountName: kubernetes-dashboard 294 | nodeSelector: 295 | "beta.kubernetes.io/os": linux 296 | # Comment the following tolerations if Dashboard must not be deployed on master 297 | tolerations: 298 | - key: node-role.kubernetes.io/master 299 | effect: NoSchedule 300 | volumes: 301 | - name: tmp-volume 302 | emptyDir: {} 303 | -------------------------------------------------------------------------------- /scripts/calico.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-config.yaml 3 | # This ConfigMap is used to configure a self-hosted Calico installation. 4 | kind: ConfigMap 5 | apiVersion: v1 6 | metadata: 7 | name: calico-config 8 | namespace: kube-system 9 | data: 10 | # Typha is disabled. 11 | typha_service_name: "none" 12 | # Configure the backend to use. 13 | calico_backend: "bird" 14 | 15 | # Configure the MTU to use 16 | veth_mtu: "1440" 17 | 18 | # The CNI network configuration to install on each node. The special 19 | # values in this config will be automatically populated. 20 | cni_network_config: |- 21 | { 22 | "name": "k8s-pod-network", 23 | "cniVersion": "0.3.1", 24 | "plugins": [ 25 | { 26 | "type": "calico", 27 | "log_level": "info", 28 | "datastore_type": "kubernetes", 29 | "nodename": "__KUBERNETES_NODE_NAME__", 30 | "mtu": __CNI_MTU__, 31 | "ipam": { 32 | "type": "calico-ipam" 33 | }, 34 | "policy": { 35 | "type": "k8s" 36 | }, 37 | "kubernetes": { 38 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 39 | } 40 | }, 41 | { 42 | "type": "portmap", 43 | "snat": true, 44 | "capabilities": {"portMappings": true} 45 | } 46 | ] 47 | } 48 | 49 | --- 50 | # Source: calico/templates/kdd-crds.yaml 51 | apiVersion: apiextensions.k8s.io/v1beta1 52 | kind: CustomResourceDefinition 53 | metadata: 54 | name: felixconfigurations.crd.projectcalico.org 55 | spec: 56 | scope: Cluster 57 | group: crd.projectcalico.org 58 | version: v1 59 | names: 60 | kind: FelixConfiguration 61 | plural: felixconfigurations 62 | singular: felixconfiguration 63 | --- 64 | 65 | apiVersion: apiextensions.k8s.io/v1beta1 66 | kind: CustomResourceDefinition 67 | metadata: 68 | name: ipamblocks.crd.projectcalico.org 69 | spec: 70 | scope: Cluster 71 | group: crd.projectcalico.org 72 | version: v1 73 | names: 74 | kind: IPAMBlock 75 | plural: ipamblocks 76 | singular: ipamblock 77 | 78 | --- 79 | 80 | apiVersion: apiextensions.k8s.io/v1beta1 81 | kind: CustomResourceDefinition 82 | metadata: 83 | name: blockaffinities.crd.projectcalico.org 84 | spec: 85 | scope: Cluster 86 | group: crd.projectcalico.org 87 | version: v1 88 | names: 89 | kind: BlockAffinity 90 | plural: blockaffinities 91 | singular: blockaffinity 92 | 93 | --- 94 | 95 | apiVersion: apiextensions.k8s.io/v1beta1 96 | kind: CustomResourceDefinition 97 | metadata: 98 | name: ipamhandles.crd.projectcalico.org 99 | spec: 100 | scope: Cluster 101 | group: crd.projectcalico.org 102 | version: v1 103 | names: 104 | kind: IPAMHandle 105 | plural: ipamhandles 106 | singular: ipamhandle 107 | 108 | --- 109 | 110 | apiVersion: apiextensions.k8s.io/v1beta1 111 | kind: CustomResourceDefinition 112 | metadata: 113 | name: ipamconfigs.crd.projectcalico.org 114 | spec: 115 | scope: Cluster 116 | group: crd.projectcalico.org 117 | version: v1 118 | names: 119 | kind: IPAMConfig 120 | plural: ipamconfigs 121 | singular: ipamconfig 122 | 123 | --- 124 | 125 | apiVersion: apiextensions.k8s.io/v1beta1 126 | kind: CustomResourceDefinition 127 | metadata: 128 | name: bgppeers.crd.projectcalico.org 129 | spec: 130 | scope: Cluster 131 | group: crd.projectcalico.org 132 | version: v1 133 | names: 134 | kind: BGPPeer 135 | plural: bgppeers 136 | singular: bgppeer 137 | 138 | --- 139 | 140 | apiVersion: apiextensions.k8s.io/v1beta1 141 | kind: CustomResourceDefinition 142 | metadata: 143 | name: bgpconfigurations.crd.projectcalico.org 144 | spec: 145 | scope: Cluster 146 | group: crd.projectcalico.org 147 | version: v1 148 | names: 149 | kind: BGPConfiguration 150 | plural: bgpconfigurations 151 | singular: bgpconfiguration 152 | 153 | --- 154 | 155 | apiVersion: apiextensions.k8s.io/v1beta1 156 | kind: CustomResourceDefinition 157 | metadata: 158 | name: ippools.crd.projectcalico.org 159 | spec: 160 | scope: Cluster 161 | group: crd.projectcalico.org 162 | version: v1 163 | names: 164 | kind: IPPool 165 | plural: ippools 166 | singular: ippool 167 | 168 | --- 169 | 170 | apiVersion: apiextensions.k8s.io/v1beta1 171 | kind: CustomResourceDefinition 172 | metadata: 173 | name: hostendpoints.crd.projectcalico.org 174 | spec: 175 | scope: Cluster 176 | group: crd.projectcalico.org 177 | version: v1 178 | names: 179 | kind: HostEndpoint 180 | plural: hostendpoints 181 | singular: hostendpoint 182 | 183 | --- 184 | 185 | apiVersion: apiextensions.k8s.io/v1beta1 186 | kind: CustomResourceDefinition 187 | metadata: 188 | name: clusterinformations.crd.projectcalico.org 189 | spec: 190 | scope: Cluster 191 | group: crd.projectcalico.org 192 | version: v1 193 | names: 194 | kind: ClusterInformation 195 | plural: clusterinformations 196 | singular: clusterinformation 197 | 198 | --- 199 | 200 | apiVersion: apiextensions.k8s.io/v1beta1 201 | kind: CustomResourceDefinition 202 | metadata: 203 | name: globalnetworkpolicies.crd.projectcalico.org 204 | spec: 205 | scope: Cluster 206 | group: crd.projectcalico.org 207 | version: v1 208 | names: 209 | kind: GlobalNetworkPolicy 210 | plural: globalnetworkpolicies 211 | singular: globalnetworkpolicy 212 | 213 | --- 214 | 215 | apiVersion: apiextensions.k8s.io/v1beta1 216 | kind: CustomResourceDefinition 217 | metadata: 218 | name: globalnetworksets.crd.projectcalico.org 219 | spec: 220 | scope: Cluster 221 | group: crd.projectcalico.org 222 | version: v1 223 | names: 224 | kind: GlobalNetworkSet 225 | plural: globalnetworksets 226 | singular: globalnetworkset 227 | 228 | --- 229 | 230 | apiVersion: apiextensions.k8s.io/v1beta1 231 | kind: CustomResourceDefinition 232 | metadata: 233 | name: networkpolicies.crd.projectcalico.org 234 | spec: 235 | scope: Namespaced 236 | group: crd.projectcalico.org 237 | version: v1 238 | names: 239 | kind: NetworkPolicy 240 | plural: networkpolicies 241 | singular: networkpolicy 242 | 243 | --- 244 | 245 | apiVersion: apiextensions.k8s.io/v1beta1 246 | kind: CustomResourceDefinition 247 | metadata: 248 | name: networksets.crd.projectcalico.org 249 | spec: 250 | scope: Namespaced 251 | group: crd.projectcalico.org 252 | version: v1 253 | names: 254 | kind: NetworkSet 255 | plural: networksets 256 | singular: networkset 257 | --- 258 | # Source: calico/templates/rbac.yaml 259 | 260 | # Include a clusterrole for the kube-controllers component, 261 | # and bind it to the calico-kube-controllers serviceaccount. 262 | kind: ClusterRole 263 | apiVersion: rbac.authorization.k8s.io/v1 264 | metadata: 265 | name: calico-kube-controllers 266 | rules: 267 | # Nodes are watched to monitor for deletions. 268 | - apiGroups: [""] 269 | resources: 270 | - nodes 271 | verbs: 272 | - watch 273 | - list 274 | - get 275 | # Pods are queried to check for existence. 276 | - apiGroups: [""] 277 | resources: 278 | - pods 279 | verbs: 280 | - get 281 | # IPAM resources are manipulated when nodes are deleted. 282 | - apiGroups: ["crd.projectcalico.org"] 283 | resources: 284 | - ippools 285 | verbs: 286 | - list 287 | - apiGroups: ["crd.projectcalico.org"] 288 | resources: 289 | - blockaffinities 290 | - ipamblocks 291 | - ipamhandles 292 | verbs: 293 | - get 294 | - list 295 | - create 296 | - update 297 | - delete 298 | # Needs access to update clusterinformations. 299 | - apiGroups: ["crd.projectcalico.org"] 300 | resources: 301 | - clusterinformations 302 | verbs: 303 | - get 304 | - create 305 | - update 306 | --- 307 | kind: ClusterRoleBinding 308 | apiVersion: rbac.authorization.k8s.io/v1 309 | metadata: 310 | name: calico-kube-controllers 311 | roleRef: 312 | apiGroup: rbac.authorization.k8s.io 313 | kind: ClusterRole 314 | name: calico-kube-controllers 315 | subjects: 316 | - kind: ServiceAccount 317 | name: calico-kube-controllers 318 | namespace: kube-system 319 | --- 320 | # Include a clusterrole for the calico-node DaemonSet, 321 | # and bind it to the calico-node serviceaccount. 322 | kind: ClusterRole 323 | apiVersion: rbac.authorization.k8s.io/v1 324 | metadata: 325 | name: calico-node 326 | rules: 327 | # The CNI plugin needs to get pods, nodes, and namespaces. 328 | - apiGroups: [""] 329 | resources: 330 | - pods 331 | - nodes 332 | - namespaces 333 | verbs: 334 | - get 335 | - apiGroups: [""] 336 | resources: 337 | - endpoints 338 | - services 339 | verbs: 340 | # Used to discover service IPs for advertisement. 341 | - watch 342 | - list 343 | # Used to discover Typhas. 344 | - get 345 | - apiGroups: [""] 346 | resources: 347 | - nodes/status 348 | verbs: 349 | # Needed for clearing NodeNetworkUnavailable flag. 350 | - patch 351 | # Calico stores some configuration information in node annotations. 352 | - update 353 | # Watch for changes to Kubernetes NetworkPolicies. 354 | - apiGroups: ["networking.k8s.io"] 355 | resources: 356 | - networkpolicies 357 | verbs: 358 | - watch 359 | - list 360 | # Used by Calico for policy information. 361 | - apiGroups: [""] 362 | resources: 363 | - pods 364 | - namespaces 365 | - serviceaccounts 366 | verbs: 367 | - list 368 | - watch 369 | # The CNI plugin patches pods/status. 370 | - apiGroups: [""] 371 | resources: 372 | - pods/status 373 | verbs: 374 | - patch 375 | # Calico monitors various CRDs for config. 376 | - apiGroups: ["crd.projectcalico.org"] 377 | resources: 378 | - globalfelixconfigs 379 | - felixconfigurations 380 | - bgppeers 381 | - globalbgpconfigs 382 | - bgpconfigurations 383 | - ippools 384 | - ipamblocks 385 | - globalnetworkpolicies 386 | - globalnetworksets 387 | - networkpolicies 388 | - networksets 389 | - clusterinformations 390 | - hostendpoints 391 | - blockaffinities 392 | verbs: 393 | - get 394 | - list 395 | - watch 396 | # Calico must create and update some CRDs on startup. 397 | - apiGroups: ["crd.projectcalico.org"] 398 | resources: 399 | - ippools 400 | - felixconfigurations 401 | - clusterinformations 402 | verbs: 403 | - create 404 | - update 405 | # Calico stores some configuration information on the node. 406 | - apiGroups: [""] 407 | resources: 408 | - nodes 409 | verbs: 410 | - get 411 | - list 412 | - watch 413 | # These permissions are only requried for upgrade from v2.6, and can 414 | # be removed after upgrade or on fresh installations. 415 | - apiGroups: ["crd.projectcalico.org"] 416 | resources: 417 | - bgpconfigurations 418 | - bgppeers 419 | verbs: 420 | - create 421 | - update 422 | # These permissions are required for Calico CNI to perform IPAM allocations. 423 | - apiGroups: ["crd.projectcalico.org"] 424 | resources: 425 | - blockaffinities 426 | - ipamblocks 427 | - ipamhandles 428 | verbs: 429 | - get 430 | - list 431 | - create 432 | - update 433 | - delete 434 | - apiGroups: ["crd.projectcalico.org"] 435 | resources: 436 | - ipamconfigs 437 | verbs: 438 | - get 439 | # Block affinities must also be watchable by confd for route aggregation. 440 | - apiGroups: ["crd.projectcalico.org"] 441 | resources: 442 | - blockaffinities 443 | verbs: 444 | - watch 445 | # The Calico IPAM migration needs to get daemonsets. These permissions can be 446 | # removed if not upgrading from an installation using host-local IPAM. 447 | - apiGroups: ["apps"] 448 | resources: 449 | - daemonsets 450 | verbs: 451 | - get 452 | --- 453 | apiVersion: rbac.authorization.k8s.io/v1 454 | kind: ClusterRoleBinding 455 | metadata: 456 | name: calico-node 457 | roleRef: 458 | apiGroup: rbac.authorization.k8s.io 459 | kind: ClusterRole 460 | name: calico-node 461 | subjects: 462 | - kind: ServiceAccount 463 | name: calico-node 464 | namespace: kube-system 465 | 466 | --- 467 | # Source: calico/templates/calico-node.yaml 468 | # This manifest installs the calico-node container, as well 469 | # as the CNI plugins and network config on 470 | # each master and worker node in a Kubernetes cluster. 471 | kind: DaemonSet 472 | apiVersion: apps/v1 473 | metadata: 474 | name: calico-node 475 | namespace: kube-system 476 | labels: 477 | k8s-app: calico-node 478 | spec: 479 | selector: 480 | matchLabels: 481 | k8s-app: calico-node 482 | updateStrategy: 483 | type: RollingUpdate 484 | rollingUpdate: 485 | maxUnavailable: 1 486 | template: 487 | metadata: 488 | labels: 489 | k8s-app: calico-node 490 | annotations: 491 | # This, along with the CriticalAddonsOnly toleration below, 492 | # marks the pod as a critical add-on, ensuring it gets 493 | # priority scheduling and that its resources are reserved 494 | # if it ever gets evicted. 495 | scheduler.alpha.kubernetes.io/critical-pod: '' 496 | spec: 497 | nodeSelector: 498 | beta.kubernetes.io/os: linux 499 | hostNetwork: true 500 | tolerations: 501 | # Make sure calico-node gets scheduled on all nodes. 502 | - effect: NoSchedule 503 | operator: Exists 504 | # Mark the pod as a critical add-on for rescheduling. 505 | - key: CriticalAddonsOnly 506 | operator: Exists 507 | - effect: NoExecute 508 | operator: Exists 509 | serviceAccountName: calico-node 510 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 511 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 512 | terminationGracePeriodSeconds: 0 513 | priorityClassName: system-node-critical 514 | initContainers: 515 | # This container performs upgrade from host-local IPAM to calico-ipam. 516 | # It can be deleted if this is a fresh installation, or if you have already 517 | # upgraded to use calico-ipam. 518 | - name: upgrade-ipam 519 | image: calico/cni:v3.10.3 520 | command: ["/opt/cni/bin/calico-ipam", "-upgrade"] 521 | env: 522 | - name: KUBERNETES_NODE_NAME 523 | valueFrom: 524 | fieldRef: 525 | fieldPath: spec.nodeName 526 | - name: CALICO_NETWORKING_BACKEND 527 | valueFrom: 528 | configMapKeyRef: 529 | name: calico-config 530 | key: calico_backend 531 | volumeMounts: 532 | - mountPath: /var/lib/cni/networks 533 | name: host-local-net-dir 534 | - mountPath: /host/opt/cni/bin 535 | name: cni-bin-dir 536 | # This container installs the CNI binaries 537 | # and CNI network config file on each node. 538 | - name: install-cni 539 | image: calico/cni:v3.10.3 540 | command: ["/install-cni.sh"] 541 | env: 542 | # Name of the CNI config file to create. 543 | - name: CNI_CONF_NAME 544 | value: "10-calico.conflist" 545 | # The CNI network config to install on each node. 546 | - name: CNI_NETWORK_CONFIG 547 | valueFrom: 548 | configMapKeyRef: 549 | name: calico-config 550 | key: cni_network_config 551 | # Set the hostname based on the k8s node name. 552 | - name: KUBERNETES_NODE_NAME 553 | valueFrom: 554 | fieldRef: 555 | fieldPath: spec.nodeName 556 | # CNI MTU Config variable 557 | - name: CNI_MTU 558 | valueFrom: 559 | configMapKeyRef: 560 | name: calico-config 561 | key: veth_mtu 562 | # Prevents the container from sleeping forever. 563 | - name: SLEEP 564 | value: "false" 565 | volumeMounts: 566 | - mountPath: /host/opt/cni/bin 567 | name: cni-bin-dir 568 | - mountPath: /host/etc/cni/net.d 569 | name: cni-net-dir 570 | # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes 571 | # to communicate with Felix over the Policy Sync API. 572 | - name: flexvol-driver 573 | image: calico/pod2daemon-flexvol:v3.10.3 574 | volumeMounts: 575 | - name: flexvol-driver-host 576 | mountPath: /host/driver 577 | containers: 578 | # Runs calico-node container on each Kubernetes node. This 579 | # container programs network policy and routes on each 580 | # host. 581 | - name: calico-node 582 | image: calico/node:v3.10.3 583 | env: 584 | # Use Kubernetes API as the backing datastore. 585 | - name: DATASTORE_TYPE 586 | value: "kubernetes" 587 | # Wait for the datastore. 588 | - name: WAIT_FOR_DATASTORE 589 | value: "true" 590 | # Set based on the k8s node name. 591 | - name: NODENAME 592 | valueFrom: 593 | fieldRef: 594 | fieldPath: spec.nodeName 595 | # Choose the backend to use. 596 | - name: CALICO_NETWORKING_BACKEND 597 | valueFrom: 598 | configMapKeyRef: 599 | name: calico-config 600 | key: calico_backend 601 | # Cluster type to identify the deployment type 602 | - name: CLUSTER_TYPE 603 | value: "k8s,bgp" 604 | # Auto-detect the BGP IP address. 605 | - name: IP 606 | value: "autodetect" 607 | # Enable IPIP 608 | - name: CALICO_IPV4POOL_IPIP 609 | value: "Always" 610 | # Set MTU for tunnel device used if ipip is enabled 611 | - name: FELIX_IPINIPMTU 612 | valueFrom: 613 | configMapKeyRef: 614 | name: calico-config 615 | key: veth_mtu 616 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 617 | # chosen from this range. Changing this value after installation will have 618 | # no effect. This should fall within `--cluster-cidr`. 619 | - name: CALICO_IPV4POOL_CIDR 620 | value: "10.11.10.0/16" 621 | # Disable file logging so `kubectl logs` works. 622 | - name: CALICO_DISABLE_FILE_LOGGING 623 | value: "true" 624 | # Set Felix endpoint to host default action to ACCEPT. 625 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 626 | value: "ACCEPT" 627 | # Disable IPv6 on Kubernetes. 628 | - name: FELIX_IPV6SUPPORT 629 | value: "false" 630 | # Set Felix logging to "info" 631 | - name: FELIX_LOGSEVERITYSCREEN 632 | value: "info" 633 | - name: FELIX_HEALTHENABLED 634 | value: "true" 635 | securityContext: 636 | privileged: true 637 | resources: 638 | requests: 639 | cpu: 250m 640 | livenessProbe: 641 | exec: 642 | command: 643 | - /bin/calico-node 644 | - -felix-live 645 | - -bird-live 646 | periodSeconds: 10 647 | initialDelaySeconds: 10 648 | failureThreshold: 6 649 | readinessProbe: 650 | exec: 651 | command: 652 | - /bin/calico-node 653 | - -felix-ready 654 | - -bird-ready 655 | periodSeconds: 10 656 | volumeMounts: 657 | - mountPath: /lib/modules 658 | name: lib-modules 659 | readOnly: true 660 | - mountPath: /run/xtables.lock 661 | name: xtables-lock 662 | readOnly: false 663 | - mountPath: /var/run/calico 664 | name: var-run-calico 665 | readOnly: false 666 | - mountPath: /var/lib/calico 667 | name: var-lib-calico 668 | readOnly: false 669 | - name: policysync 670 | mountPath: /var/run/nodeagent 671 | volumes: 672 | # Used by calico-node. 673 | - name: lib-modules 674 | hostPath: 675 | path: /lib/modules 676 | - name: var-run-calico 677 | hostPath: 678 | path: /var/run/calico 679 | - name: var-lib-calico 680 | hostPath: 681 | path: /var/lib/calico 682 | - name: xtables-lock 683 | hostPath: 684 | path: /run/xtables.lock 685 | type: FileOrCreate 686 | # Used to install CNI. 687 | - name: cni-bin-dir 688 | hostPath: 689 | path: /opt/cni/bin 690 | - name: cni-net-dir 691 | hostPath: 692 | path: /etc/cni/net.d 693 | # Mount in the directory for host-local IPAM allocations. This is 694 | # used when upgrading from host-local to calico-ipam, and can be removed 695 | # if not using the upgrade-ipam init container. 696 | - name: host-local-net-dir 697 | hostPath: 698 | path: /var/lib/cni/networks 699 | # Used to create per-pod Unix Domain Sockets 700 | - name: policysync 701 | hostPath: 702 | type: DirectoryOrCreate 703 | path: /var/run/nodeagent 704 | # Used to install Flex Volume Driver 705 | - name: flexvol-driver-host 706 | hostPath: 707 | type: DirectoryOrCreate 708 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds 709 | --- 710 | 711 | apiVersion: v1 712 | kind: ServiceAccount 713 | metadata: 714 | name: calico-node 715 | namespace: kube-system 716 | 717 | --- 718 | # Source: calico/templates/calico-kube-controllers.yaml 719 | 720 | # See https://github.com/projectcalico/kube-controllers 721 | apiVersion: apps/v1 722 | kind: Deployment 723 | metadata: 724 | name: calico-kube-controllers 725 | namespace: kube-system 726 | labels: 727 | k8s-app: calico-kube-controllers 728 | spec: 729 | # The controllers can only have a single active instance. 730 | replicas: 1 731 | selector: 732 | matchLabels: 733 | k8s-app: calico-kube-controllers 734 | strategy: 735 | type: Recreate 736 | template: 737 | metadata: 738 | name: calico-kube-controllers 739 | namespace: kube-system 740 | labels: 741 | k8s-app: calico-kube-controllers 742 | annotations: 743 | scheduler.alpha.kubernetes.io/critical-pod: '' 744 | spec: 745 | nodeSelector: 746 | beta.kubernetes.io/os: linux 747 | tolerations: 748 | # Mark the pod as a critical add-on for rescheduling. 749 | - key: CriticalAddonsOnly 750 | operator: Exists 751 | - key: node-role.kubernetes.io/master 752 | effect: NoSchedule 753 | serviceAccountName: calico-kube-controllers 754 | priorityClassName: system-cluster-critical 755 | containers: 756 | - name: calico-kube-controllers 757 | image: calico/kube-controllers:v3.10.3 758 | env: 759 | # Choose which controllers to run. 760 | - name: ENABLED_CONTROLLERS 761 | value: node 762 | - name: DATASTORE_TYPE 763 | value: kubernetes 764 | readinessProbe: 765 | exec: 766 | command: 767 | - /usr/bin/check-status 768 | - -r 769 | 770 | --- 771 | 772 | apiVersion: v1 773 | kind: ServiceAccount 774 | metadata: 775 | name: calico-kube-controllers 776 | namespace: kube-system 777 | --- 778 | # Source: calico/templates/calico-etcd-secrets.yaml 779 | 780 | --- 781 | # Source: calico/templates/calico-typha.yaml 782 | 783 | --- 784 | # Source: calico/templates/configure-canal.yaml 785 | 786 | 787 | -------------------------------------------------------------------------------- /plugins/calico-v3.10.3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-config.yaml 3 | # This ConfigMap is used to configure a self-hosted Calico installation. 4 | kind: ConfigMap 5 | apiVersion: v1 6 | metadata: 7 | name: calico-config 8 | namespace: kube-system 9 | data: 10 | # Typha is disabled. 11 | typha_service_name: "none" 12 | # Configure the backend to use. 13 | calico_backend: "bird" 14 | 15 | # Configure the MTU to use 16 | veth_mtu: "1440" 17 | 18 | # The CNI network configuration to install on each node. The special 19 | # values in this config will be automatically populated. 20 | cni_network_config: |- 21 | { 22 | "name": "k8s-pod-network", 23 | "cniVersion": "0.3.1", 24 | "plugins": [ 25 | { 26 | "type": "calico", 27 | "log_level": "info", 28 | "datastore_type": "kubernetes", 29 | "nodename": "__KUBERNETES_NODE_NAME__", 30 | "mtu": __CNI_MTU__, 31 | "ipam": { 32 | "type": "calico-ipam" 33 | }, 34 | "policy": { 35 | "type": "k8s" 36 | }, 37 | "kubernetes": { 38 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 39 | } 40 | }, 41 | { 42 | "type": "portmap", 43 | "snat": true, 44 | "capabilities": {"portMappings": true} 45 | } 46 | ] 47 | } 48 | 49 | --- 50 | # Source: calico/templates/kdd-crds.yaml 51 | apiVersion: apiextensions.k8s.io/v1beta1 52 | kind: CustomResourceDefinition 53 | metadata: 54 | name: felixconfigurations.crd.projectcalico.org 55 | spec: 56 | scope: Cluster 57 | group: crd.projectcalico.org 58 | version: v1 59 | names: 60 | kind: FelixConfiguration 61 | plural: felixconfigurations 62 | singular: felixconfiguration 63 | --- 64 | 65 | apiVersion: apiextensions.k8s.io/v1beta1 66 | kind: CustomResourceDefinition 67 | metadata: 68 | name: ipamblocks.crd.projectcalico.org 69 | spec: 70 | scope: Cluster 71 | group: crd.projectcalico.org 72 | version: v1 73 | names: 74 | kind: IPAMBlock 75 | plural: ipamblocks 76 | singular: ipamblock 77 | 78 | --- 79 | 80 | apiVersion: apiextensions.k8s.io/v1beta1 81 | kind: CustomResourceDefinition 82 | metadata: 83 | name: blockaffinities.crd.projectcalico.org 84 | spec: 85 | scope: Cluster 86 | group: crd.projectcalico.org 87 | version: v1 88 | names: 89 | kind: BlockAffinity 90 | plural: blockaffinities 91 | singular: blockaffinity 92 | 93 | --- 94 | 95 | apiVersion: apiextensions.k8s.io/v1beta1 96 | kind: CustomResourceDefinition 97 | metadata: 98 | name: ipamhandles.crd.projectcalico.org 99 | spec: 100 | scope: Cluster 101 | group: crd.projectcalico.org 102 | version: v1 103 | names: 104 | kind: IPAMHandle 105 | plural: ipamhandles 106 | singular: ipamhandle 107 | 108 | --- 109 | 110 | apiVersion: apiextensions.k8s.io/v1beta1 111 | kind: CustomResourceDefinition 112 | metadata: 113 | name: ipamconfigs.crd.projectcalico.org 114 | spec: 115 | scope: Cluster 116 | group: crd.projectcalico.org 117 | version: v1 118 | names: 119 | kind: IPAMConfig 120 | plural: ipamconfigs 121 | singular: ipamconfig 122 | 123 | --- 124 | 125 | apiVersion: apiextensions.k8s.io/v1beta1 126 | kind: CustomResourceDefinition 127 | metadata: 128 | name: bgppeers.crd.projectcalico.org 129 | spec: 130 | scope: Cluster 131 | group: crd.projectcalico.org 132 | version: v1 133 | names: 134 | kind: BGPPeer 135 | plural: bgppeers 136 | singular: bgppeer 137 | 138 | --- 139 | 140 | apiVersion: apiextensions.k8s.io/v1beta1 141 | kind: CustomResourceDefinition 142 | metadata: 143 | name: bgpconfigurations.crd.projectcalico.org 144 | spec: 145 | scope: Cluster 146 | group: crd.projectcalico.org 147 | version: v1 148 | names: 149 | kind: BGPConfiguration 150 | plural: bgpconfigurations 151 | singular: bgpconfiguration 152 | 153 | --- 154 | 155 | apiVersion: apiextensions.k8s.io/v1beta1 156 | kind: CustomResourceDefinition 157 | metadata: 158 | name: ippools.crd.projectcalico.org 159 | spec: 160 | scope: Cluster 161 | group: crd.projectcalico.org 162 | version: v1 163 | names: 164 | kind: IPPool 165 | plural: ippools 166 | singular: ippool 167 | 168 | --- 169 | 170 | apiVersion: apiextensions.k8s.io/v1beta1 171 | kind: CustomResourceDefinition 172 | metadata: 173 | name: hostendpoints.crd.projectcalico.org 174 | spec: 175 | scope: Cluster 176 | group: crd.projectcalico.org 177 | version: v1 178 | names: 179 | kind: HostEndpoint 180 | plural: hostendpoints 181 | singular: hostendpoint 182 | 183 | --- 184 | 185 | apiVersion: apiextensions.k8s.io/v1beta1 186 | kind: CustomResourceDefinition 187 | metadata: 188 | name: clusterinformations.crd.projectcalico.org 189 | spec: 190 | scope: Cluster 191 | group: crd.projectcalico.org 192 | version: v1 193 | names: 194 | kind: ClusterInformation 195 | plural: clusterinformations 196 | singular: clusterinformation 197 | 198 | --- 199 | 200 | apiVersion: apiextensions.k8s.io/v1beta1 201 | kind: CustomResourceDefinition 202 | metadata: 203 | name: globalnetworkpolicies.crd.projectcalico.org 204 | spec: 205 | scope: Cluster 206 | group: crd.projectcalico.org 207 | version: v1 208 | names: 209 | kind: GlobalNetworkPolicy 210 | plural: globalnetworkpolicies 211 | singular: globalnetworkpolicy 212 | 213 | --- 214 | 215 | apiVersion: apiextensions.k8s.io/v1beta1 216 | kind: CustomResourceDefinition 217 | metadata: 218 | name: globalnetworksets.crd.projectcalico.org 219 | spec: 220 | scope: Cluster 221 | group: crd.projectcalico.org 222 | version: v1 223 | names: 224 | kind: GlobalNetworkSet 225 | plural: globalnetworksets 226 | singular: globalnetworkset 227 | 228 | --- 229 | 230 | apiVersion: apiextensions.k8s.io/v1beta1 231 | kind: CustomResourceDefinition 232 | metadata: 233 | name: networkpolicies.crd.projectcalico.org 234 | spec: 235 | scope: Namespaced 236 | group: crd.projectcalico.org 237 | version: v1 238 | names: 239 | kind: NetworkPolicy 240 | plural: networkpolicies 241 | singular: networkpolicy 242 | 243 | --- 244 | 245 | apiVersion: apiextensions.k8s.io/v1beta1 246 | kind: CustomResourceDefinition 247 | metadata: 248 | name: networksets.crd.projectcalico.org 249 | spec: 250 | scope: Namespaced 251 | group: crd.projectcalico.org 252 | version: v1 253 | names: 254 | kind: NetworkSet 255 | plural: networksets 256 | singular: networkset 257 | --- 258 | # Source: calico/templates/rbac.yaml 259 | 260 | # Include a clusterrole for the kube-controllers component, 261 | # and bind it to the calico-kube-controllers serviceaccount. 262 | kind: ClusterRole 263 | apiVersion: rbac.authorization.k8s.io/v1 264 | metadata: 265 | name: calico-kube-controllers 266 | rules: 267 | # Nodes are watched to monitor for deletions. 268 | - apiGroups: [""] 269 | resources: 270 | - nodes 271 | verbs: 272 | - watch 273 | - list 274 | - get 275 | # Pods are queried to check for existence. 276 | - apiGroups: [""] 277 | resources: 278 | - pods 279 | verbs: 280 | - get 281 | # IPAM resources are manipulated when nodes are deleted. 282 | - apiGroups: ["crd.projectcalico.org"] 283 | resources: 284 | - ippools 285 | verbs: 286 | - list 287 | - apiGroups: ["crd.projectcalico.org"] 288 | resources: 289 | - blockaffinities 290 | - ipamblocks 291 | - ipamhandles 292 | verbs: 293 | - get 294 | - list 295 | - create 296 | - update 297 | - delete 298 | # Needs access to update clusterinformations. 299 | - apiGroups: ["crd.projectcalico.org"] 300 | resources: 301 | - clusterinformations 302 | verbs: 303 | - get 304 | - create 305 | - update 306 | --- 307 | kind: ClusterRoleBinding 308 | apiVersion: rbac.authorization.k8s.io/v1 309 | metadata: 310 | name: calico-kube-controllers 311 | roleRef: 312 | apiGroup: rbac.authorization.k8s.io 313 | kind: ClusterRole 314 | name: calico-kube-controllers 315 | subjects: 316 | - kind: ServiceAccount 317 | name: calico-kube-controllers 318 | namespace: kube-system 319 | --- 320 | # Include a clusterrole for the calico-node DaemonSet, 321 | # and bind it to the calico-node serviceaccount. 322 | kind: ClusterRole 323 | apiVersion: rbac.authorization.k8s.io/v1 324 | metadata: 325 | name: calico-node 326 | rules: 327 | # The CNI plugin needs to get pods, nodes, and namespaces. 328 | - apiGroups: [""] 329 | resources: 330 | - pods 331 | - nodes 332 | - namespaces 333 | verbs: 334 | - get 335 | - apiGroups: [""] 336 | resources: 337 | - endpoints 338 | - services 339 | verbs: 340 | # Used to discover service IPs for advertisement. 341 | - watch 342 | - list 343 | # Used to discover Typhas. 344 | - get 345 | - apiGroups: [""] 346 | resources: 347 | - nodes/status 348 | verbs: 349 | # Needed for clearing NodeNetworkUnavailable flag. 350 | - patch 351 | # Calico stores some configuration information in node annotations. 352 | - update 353 | # Watch for changes to Kubernetes NetworkPolicies. 354 | - apiGroups: ["networking.k8s.io"] 355 | resources: 356 | - networkpolicies 357 | verbs: 358 | - watch 359 | - list 360 | # Used by Calico for policy information. 361 | - apiGroups: [""] 362 | resources: 363 | - pods 364 | - namespaces 365 | - serviceaccounts 366 | verbs: 367 | - list 368 | - watch 369 | # The CNI plugin patches pods/status. 370 | - apiGroups: [""] 371 | resources: 372 | - pods/status 373 | verbs: 374 | - patch 375 | # Calico monitors various CRDs for config. 376 | - apiGroups: ["crd.projectcalico.org"] 377 | resources: 378 | - globalfelixconfigs 379 | - felixconfigurations 380 | - bgppeers 381 | - globalbgpconfigs 382 | - bgpconfigurations 383 | - ippools 384 | - ipamblocks 385 | - globalnetworkpolicies 386 | - globalnetworksets 387 | - networkpolicies 388 | - networksets 389 | - clusterinformations 390 | - hostendpoints 391 | - blockaffinities 392 | verbs: 393 | - get 394 | - list 395 | - watch 396 | # Calico must create and update some CRDs on startup. 397 | - apiGroups: ["crd.projectcalico.org"] 398 | resources: 399 | - ippools 400 | - felixconfigurations 401 | - clusterinformations 402 | verbs: 403 | - create 404 | - update 405 | # Calico stores some configuration information on the node. 406 | - apiGroups: [""] 407 | resources: 408 | - nodes 409 | verbs: 410 | - get 411 | - list 412 | - watch 413 | # These permissions are only requried for upgrade from v2.6, and can 414 | # be removed after upgrade or on fresh installations. 415 | - apiGroups: ["crd.projectcalico.org"] 416 | resources: 417 | - bgpconfigurations 418 | - bgppeers 419 | verbs: 420 | - create 421 | - update 422 | # These permissions are required for Calico CNI to perform IPAM allocations. 423 | - apiGroups: ["crd.projectcalico.org"] 424 | resources: 425 | - blockaffinities 426 | - ipamblocks 427 | - ipamhandles 428 | verbs: 429 | - get 430 | - list 431 | - create 432 | - update 433 | - delete 434 | - apiGroups: ["crd.projectcalico.org"] 435 | resources: 436 | - ipamconfigs 437 | verbs: 438 | - get 439 | # Block affinities must also be watchable by confd for route aggregation. 440 | - apiGroups: ["crd.projectcalico.org"] 441 | resources: 442 | - blockaffinities 443 | verbs: 444 | - watch 445 | # The Calico IPAM migration needs to get daemonsets. These permissions can be 446 | # removed if not upgrading from an installation using host-local IPAM. 447 | - apiGroups: ["apps"] 448 | resources: 449 | - daemonsets 450 | verbs: 451 | - get 452 | --- 453 | apiVersion: rbac.authorization.k8s.io/v1 454 | kind: ClusterRoleBinding 455 | metadata: 456 | name: calico-node 457 | roleRef: 458 | apiGroup: rbac.authorization.k8s.io 459 | kind: ClusterRole 460 | name: calico-node 461 | subjects: 462 | - kind: ServiceAccount 463 | name: calico-node 464 | namespace: kube-system 465 | 466 | --- 467 | # Source: calico/templates/calico-node.yaml 468 | # This manifest installs the calico-node container, as well 469 | # as the CNI plugins and network config on 470 | # each master and worker node in a Kubernetes cluster. 471 | kind: DaemonSet 472 | apiVersion: apps/v1 473 | metadata: 474 | name: calico-node 475 | namespace: kube-system 476 | labels: 477 | k8s-app: calico-node 478 | spec: 479 | selector: 480 | matchLabels: 481 | k8s-app: calico-node 482 | updateStrategy: 483 | type: RollingUpdate 484 | rollingUpdate: 485 | maxUnavailable: 1 486 | template: 487 | metadata: 488 | labels: 489 | k8s-app: calico-node 490 | annotations: 491 | # This, along with the CriticalAddonsOnly toleration below, 492 | # marks the pod as a critical add-on, ensuring it gets 493 | # priority scheduling and that its resources are reserved 494 | # if it ever gets evicted. 495 | scheduler.alpha.kubernetes.io/critical-pod: '' 496 | spec: 497 | nodeSelector: 498 | beta.kubernetes.io/os: linux 499 | hostNetwork: true 500 | tolerations: 501 | # Make sure calico-node gets scheduled on all nodes. 502 | - effect: NoSchedule 503 | operator: Exists 504 | # Mark the pod as a critical add-on for rescheduling. 505 | - key: CriticalAddonsOnly 506 | operator: Exists 507 | - effect: NoExecute 508 | operator: Exists 509 | serviceAccountName: calico-node 510 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 511 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 512 | terminationGracePeriodSeconds: 0 513 | priorityClassName: system-node-critical 514 | initContainers: 515 | # This container performs upgrade from host-local IPAM to calico-ipam. 516 | # It can be deleted if this is a fresh installation, or if you have already 517 | # upgraded to use calico-ipam. 518 | - name: upgrade-ipam 519 | image: calico/cni:v3.10.3 520 | command: ["/opt/cni/bin/calico-ipam", "-upgrade"] 521 | env: 522 | - name: KUBERNETES_NODE_NAME 523 | valueFrom: 524 | fieldRef: 525 | fieldPath: spec.nodeName 526 | - name: CALICO_NETWORKING_BACKEND 527 | valueFrom: 528 | configMapKeyRef: 529 | name: calico-config 530 | key: calico_backend 531 | volumeMounts: 532 | - mountPath: /var/lib/cni/networks 533 | name: host-local-net-dir 534 | - mountPath: /host/opt/cni/bin 535 | name: cni-bin-dir 536 | # This container installs the CNI binaries 537 | # and CNI network config file on each node. 538 | - name: install-cni 539 | image: calico/cni:v3.10.3 540 | command: ["/install-cni.sh"] 541 | env: 542 | # Name of the CNI config file to create. 543 | - name: CNI_CONF_NAME 544 | value: "10-calico.conflist" 545 | # The CNI network config to install on each node. 546 | - name: CNI_NETWORK_CONFIG 547 | valueFrom: 548 | configMapKeyRef: 549 | name: calico-config 550 | key: cni_network_config 551 | # Set the hostname based on the k8s node name. 552 | - name: KUBERNETES_NODE_NAME 553 | valueFrom: 554 | fieldRef: 555 | fieldPath: spec.nodeName 556 | # CNI MTU Config variable 557 | - name: CNI_MTU 558 | valueFrom: 559 | configMapKeyRef: 560 | name: calico-config 561 | key: veth_mtu 562 | # Prevents the container from sleeping forever. 563 | - name: SLEEP 564 | value: "false" 565 | volumeMounts: 566 | - mountPath: /host/opt/cni/bin 567 | name: cni-bin-dir 568 | - mountPath: /host/etc/cni/net.d 569 | name: cni-net-dir 570 | # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes 571 | # to communicate with Felix over the Policy Sync API. 572 | - name: flexvol-driver 573 | image: calico/pod2daemon-flexvol:v3.10.3 574 | volumeMounts: 575 | - name: flexvol-driver-host 576 | mountPath: /host/driver 577 | containers: 578 | # Runs calico-node container on each Kubernetes node. This 579 | # container programs network policy and routes on each 580 | # host. 581 | - name: calico-node 582 | image: calico/node:v3.10.3 583 | env: 584 | # Use Kubernetes API as the backing datastore. 585 | - name: DATASTORE_TYPE 586 | value: "kubernetes" 587 | # Wait for the datastore. 588 | - name: WAIT_FOR_DATASTORE 589 | value: "true" 590 | # Set based on the k8s node name. 591 | - name: NODENAME 592 | valueFrom: 593 | fieldRef: 594 | fieldPath: spec.nodeName 595 | # Choose the backend to use. 596 | - name: CALICO_NETWORKING_BACKEND 597 | valueFrom: 598 | configMapKeyRef: 599 | name: calico-config 600 | key: calico_backend 601 | # Cluster type to identify the deployment type 602 | - name: CLUSTER_TYPE 603 | value: "k8s,bgp" 604 | # Auto-detect the BGP IP address. 605 | - name: IP 606 | value: "autodetect" 607 | # Enable IPIP 608 | - name: CALICO_IPV4POOL_IPIP 609 | value: "Always" 610 | # Set MTU for tunnel device used if ipip is enabled 611 | - name: FELIX_IPINIPMTU 612 | valueFrom: 613 | configMapKeyRef: 614 | name: calico-config 615 | key: veth_mtu 616 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 617 | # chosen from this range. Changing this value after installation will have 618 | # no effect. This should fall within `--cluster-cidr`. 619 | - name: CALICO_IPV4POOL_CIDR 620 | value: "192.168.0.0/16" 621 | # Disable file logging so `kubectl logs` works. 622 | - name: CALICO_DISABLE_FILE_LOGGING 623 | value: "true" 624 | # Set Felix endpoint to host default action to ACCEPT. 625 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 626 | value: "ACCEPT" 627 | # Disable IPv6 on Kubernetes. 628 | - name: FELIX_IPV6SUPPORT 629 | value: "false" 630 | # Set Felix logging to "info" 631 | - name: FELIX_LOGSEVERITYSCREEN 632 | value: "info" 633 | - name: FELIX_HEALTHENABLED 634 | value: "true" 635 | securityContext: 636 | privileged: true 637 | resources: 638 | requests: 639 | cpu: 250m 640 | livenessProbe: 641 | exec: 642 | command: 643 | - /bin/calico-node 644 | - -felix-live 645 | - -bird-live 646 | periodSeconds: 10 647 | initialDelaySeconds: 10 648 | failureThreshold: 6 649 | readinessProbe: 650 | exec: 651 | command: 652 | - /bin/calico-node 653 | - -felix-ready 654 | - -bird-ready 655 | periodSeconds: 10 656 | volumeMounts: 657 | - mountPath: /lib/modules 658 | name: lib-modules 659 | readOnly: true 660 | - mountPath: /run/xtables.lock 661 | name: xtables-lock 662 | readOnly: false 663 | - mountPath: /var/run/calico 664 | name: var-run-calico 665 | readOnly: false 666 | - mountPath: /var/lib/calico 667 | name: var-lib-calico 668 | readOnly: false 669 | - name: policysync 670 | mountPath: /var/run/nodeagent 671 | volumes: 672 | # Used by calico-node. 673 | - name: lib-modules 674 | hostPath: 675 | path: /lib/modules 676 | - name: var-run-calico 677 | hostPath: 678 | path: /var/run/calico 679 | - name: var-lib-calico 680 | hostPath: 681 | path: /var/lib/calico 682 | - name: xtables-lock 683 | hostPath: 684 | path: /run/xtables.lock 685 | type: FileOrCreate 686 | # Used to install CNI. 687 | - name: cni-bin-dir 688 | hostPath: 689 | path: /opt/cni/bin 690 | - name: cni-net-dir 691 | hostPath: 692 | path: /etc/cni/net.d 693 | # Mount in the directory for host-local IPAM allocations. This is 694 | # used when upgrading from host-local to calico-ipam, and can be removed 695 | # if not using the upgrade-ipam init container. 696 | - name: host-local-net-dir 697 | hostPath: 698 | path: /var/lib/cni/networks 699 | # Used to create per-pod Unix Domain Sockets 700 | - name: policysync 701 | hostPath: 702 | type: DirectoryOrCreate 703 | path: /var/run/nodeagent 704 | # Used to install Flex Volume Driver 705 | - name: flexvol-driver-host 706 | hostPath: 707 | type: DirectoryOrCreate 708 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds 709 | --- 710 | 711 | apiVersion: v1 712 | kind: ServiceAccount 713 | metadata: 714 | name: calico-node 715 | namespace: kube-system 716 | 717 | --- 718 | # Source: calico/templates/calico-kube-controllers.yaml 719 | 720 | # See https://github.com/projectcalico/kube-controllers 721 | apiVersion: apps/v1 722 | kind: Deployment 723 | metadata: 724 | name: calico-kube-controllers 725 | namespace: kube-system 726 | labels: 727 | k8s-app: calico-kube-controllers 728 | spec: 729 | # The controllers can only have a single active instance. 730 | replicas: 1 731 | selector: 732 | matchLabels: 733 | k8s-app: calico-kube-controllers 734 | strategy: 735 | type: Recreate 736 | template: 737 | metadata: 738 | name: calico-kube-controllers 739 | namespace: kube-system 740 | labels: 741 | k8s-app: calico-kube-controllers 742 | annotations: 743 | scheduler.alpha.kubernetes.io/critical-pod: '' 744 | spec: 745 | nodeSelector: 746 | beta.kubernetes.io/os: linux 747 | tolerations: 748 | # Mark the pod as a critical add-on for rescheduling. 749 | - key: CriticalAddonsOnly 750 | operator: Exists 751 | - key: node-role.kubernetes.io/master 752 | effect: NoSchedule 753 | serviceAccountName: calico-kube-controllers 754 | priorityClassName: system-cluster-critical 755 | containers: 756 | - name: calico-kube-controllers 757 | image: calico/kube-controllers:v3.10.3 758 | env: 759 | # Choose which controllers to run. 760 | - name: ENABLED_CONTROLLERS 761 | value: node 762 | - name: DATASTORE_TYPE 763 | value: kubernetes 764 | readinessProbe: 765 | exec: 766 | command: 767 | - /usr/bin/check-status 768 | - -r 769 | 770 | --- 771 | 772 | apiVersion: v1 773 | kind: ServiceAccount 774 | metadata: 775 | name: calico-kube-controllers 776 | namespace: kube-system 777 | --- 778 | # Source: calico/templates/calico-etcd-secrets.yaml 779 | 780 | --- 781 | # Source: calico/templates/calico-typha.yaml 782 | 783 | --- 784 | # Source: calico/templates/configure-canal.yaml 785 | 786 | 787 | --------------------------------------------------------------------------------