├── LICENSE
├── README.md
├── bootstrap
└── bootstrap.secret.yaml
├── calico
├── calico-ipv6.yaml
├── calico-typha.yaml
└── calico.yaml
├── cilium-test
├── connectivity-check.yaml
└── monitoring-example.yaml
├── dashboard
└── dashboard-user.yaml
├── doc
├── Enable-implement-IPv4-IPv6.md
├── Kubernetes_docker.md
├── Minikube_init.md
├── Upgrade_Kubernetes.md
├── kube-proxy_permissions.md
├── kubeadm-install-IPV6-IPV4.md
├── kubeadm-install-V1.32.md
├── kubeadm-install.md
├── kubernetes_install_cilium.md
├── v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.23.3-CentOS-binary-install.md
├── v1.23.4-CentOS-binary-install.md
├── v1.23.5-CentOS-binary-install.md
├── v1.23.6-CentOS-binary-install.md
├── v1.23.7-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.24.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.24.0-CentOS-binary-install-IPv6-IPv4.md
├── v1.24.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.24.1-CentOS-binary-install-IPv6-IPv4.md
├── v1.24.1-Ubuntu-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.24.2-CentOS-binary-install-IPv6-IPv4.md
├── v1.24.3-CentOS-binary-install-IPv6-IPv4.md
├── v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.25.0-CentOS-binary-install-IPv6-IPv4.md
├── v1.25.4-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.26.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md
├── v1.26.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.28.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.29.2-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.30.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.30.2-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── v1.31.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
└── v1.32.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md
├── images
├── 1.jpg
├── 2.jpg
└── 3.jpg
├── ingress-yaml
├── backend.yaml
├── deploy.yaml
└── ingress-demo-app.yaml
├── metrics-server
├── high-availability.yaml
└── metrics-server-components.yaml
├── pki
├── admin-csr.json
├── apiserver-csr.json
├── ca-config.json
├── ca-csr.json
├── etcd-ca-csr.json
├── etcd-csr.json
├── front-proxy-ca-csr.json
├── front-proxy-client-csr.json
├── kube-proxy-csr.json
├── kubelet-csr.json
├── manager-csr.json
└── scheduler-csr.json
├── shell
├── download.sh
└── update_k8s.sh
└── yaml
├── PHP-Nginx-Deployment-ConfMap-Service.yaml
├── admin.yaml
├── calico.yaml
├── cby.yaml
├── connectivity-check.yaml
├── dashboard-user.yaml
├── deploy.yaml
├── metrics-server-components.yaml
├── monitoring-example.yaml
├── mysql-ha-read-write-separation.yaml
├── nfs-storage.yaml
└── vscode.yaml
/README.md:
--------------------------------------------------------------------------------
1 | # kubernetes (k8s) 二进制高可用安装
2 |
3 | [Kubernetes](https://github.com/cby-chen/Kubernetes) 开源不易,帮忙点个star,谢谢了🌹
4 |
5 | GitHub访问不通畅可以访问国内GitEE https://gitee.com/cby-inc/Kubernetes
6 |
7 | ## ⚠️ 中国大陆地区 Docker 以及其他仓库镜像已被 GFW 完全封锁!!!
8 | ## ⚠️ 现镜像已无法直接拉取,需要通过一些特殊方式进行!!!
9 |
10 | # 一、写在前面
11 |
12 | 打开文档,使用全文替换,全局替换主机IP即可。
13 |
14 | 各位仁兄,手动方式部署,没必要折磨自己。
15 |
16 | 能用自动部署就用自动部署吧。推荐:https://github.com/easzlab/kubeasz
17 |
18 | 也可以用 [kubeadm-install.md](./doc/kubeadm-install-V1.32.md)
19 |
20 | # 二、介绍
21 |
22 | 我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。
23 | 若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。
24 | 不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。
25 | 若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。
26 | 如果本地没有IPv6,那么Calico需要使用IPv4的yaml配置文件。
27 | 后续尽可能第一时间更新新版本文档,更新后内容在GitHub。
28 |
29 | 最新版本的文档对于IPv6方面的写的比较详细。
30 |
31 | 不要删除 IPv6 相关配置!!!
32 |
33 | 不要删除 IPv6 相关配置!!!
34 |
35 | 不要删除 IPv6 相关配置!!!
36 |
37 | # 三、当前文档版本
38 |
39 | - 1.32.x
40 | - 1.31.x
41 | - 1.30.x
42 | - 1.29.x
43 | - 1.28.x
44 | - 1.27.x
45 | - 1.26.x
46 | - 1.25.x
47 | - 1.24.x
48 | - 1.23.x
49 | - 1.22.x
50 | - 1.21.x
51 |
52 | 大版本之间是通用的,比如使用 1.26.0 的文档可以安装 1.26.x 各种版本,只是安装过程中的下载新的包即可。
53 |
54 | # 四、访问地址
55 |
56 | 手动项目地址:
57 | https://github.com/cby-chen/Kubernetes
58 |
59 | 脚本项目地址(已停更):
60 | https://github.com/cby-chen/Binary_installation_of_Kubernetes
61 | https://github.com/cby-chen/kube_ansible
62 |
63 | # 五、文档
64 |
65 | ### 最新版本文档
66 | - [kubeadm-install.md](./doc/kubeadm-install-V1.32.md)
67 | - [v1.32.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.32.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
68 |
69 | ## 安装文档
70 | ### 1.32.x版本
71 | - [v1.32.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.32.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
72 |
73 | ### 1.31.x版本
74 | - [v1.31.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.31.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
75 |
76 | ### 1.30.x版本
77 | - [v1.30.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.30.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
78 | - [v1.30.2-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.30.2-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
79 |
80 | ### 1.29.x版本
81 | - [v1.29.2-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.29.2-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
82 |
83 | ### 1.28.x版本
84 | - [v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.28.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
85 | - [v1.28.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.28.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
86 |
87 | ### 1.27.x版本
88 | - [v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.27.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
89 | - [v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.27.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
90 |
91 | ### 1.26.x版本
92 | - [v1.26.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md](./doc/v1.26.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md)
93 | - [v1.26.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md](./doc/v1.26.1-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md)
94 |
95 | ### 1.25.x版本
96 | - [v1.25.0-CentOS-binary-install-IPv6-IPv4.md](./doc/v1.25.0-CentOS-binary-install-IPv6-IPv4.md)
97 | - [v1.25.4-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md](./doc/v1.25.4-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md)
98 |
99 | ### 1.24.x版本
100 | - [v1.24.0-CentOS-binary-install-IPv6-IPv4.md](./doc/v1.24.0-CentOS-binary-install-IPv6-IPv4.md)
101 | - [v1.24.1-CentOS-binary-install-IPv6-IPv4.md](./doc/v1.24.1-CentOS-binary-install-IPv6-IPv4.md)
102 | - [v1.24.2-CentOS-binary-install-IPv6-IPv4.md](./doc/v1.24.2-CentOS-binary-install-IPv6-IPv4.md)
103 | - [v1.24.3-CentOS-binary-install-IPv6-IPv4.md](./doc/v1.24.3-CentOS-binary-install-IPv6-IPv4.md)
104 |
105 | ### 1.23.x版本
106 | - [v1.23.3-CentOS-binary-install](./doc/v1.23.3-CentOS-binary-install.md)
107 | - [v1.23.4-CentOS-binary-install](./doc/v1.23.4-CentOS-binary-install.md)
108 | - [v1.23.5-CentOS-binary-install](./doc/v1.23.5-CentOS-binary-install.md)
109 | - [v1.23.6-CentOS-binary-install](./doc/v1.23.6-CentOS-binary-install.md)
110 |
111 | ### 1.22.x版本
112 | - [v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md](./doc/v1.22.10-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md)
113 |
114 | ### 1.21.x版本
115 | - [v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md](./doc/v1.21.13-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md)
116 |
117 | ## 其他文档
118 | - 修复kube-proxy证书权限过大问题 [kube-proxy_permissions.md](./doc/kube-proxy_permissions.md)
119 | - 使用kubeadm初始化IPV4/IPV6集群 [kubeadm-install-IPV6-IPV4.md](./doc/kubeadm-install-IPV6-IPV4.md)
120 | - IPv4集群启用IPv6功能,关闭IPv6则反之 [Enable-implement-IPv4-IPv6.md](./doc/Enable-implement-IPv4-IPv6.md)
121 | - 升级kubernetes集群 [Upgrade_Kubernetes.md](./doc/Upgrade_Kubernetes.md)
122 | - Minikube初始化集群 [Minikube_init.md](./doc/Minikube_init.md)
123 | - Kubernetes 1.24 1.25 集群使用docker作为容器 [Kubernetes_docker](./doc/Kubernetes_docker.md)
124 | - kubernetes 安装cilium [kubernetes_install_cilium](./doc/kubernetes_install_cilium.md)
125 | - 二进制安装每个版本文档
126 |
127 | # 六、安装包
128 |
129 | - 123云盘 https://www.123pan.com/s/Z8ArVv-PG60d
130 | - 夸克云盘 https://pan.quark.cn/s/8d8525a12895
131 |
132 | - https://github.com/cby-chen/Kubernetes/releases
133 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.22.10/kubernetes-v1.22.10.tar
134 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.21.13/kubernetes-v1.21.13.tar
135 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/cby/Kubernetes.tar
136 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.23.4/kubernetes-v1.23.4.tar
137 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.23.5/kubernetes-v1.24.5.tar
138 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.23.6/kubernetes-v1.23.6.tar
139 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.23.7/kubernetes-v1.23.7.tar
140 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.24.0/kubernetes-v1.24.0.tar
141 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.24.1/kubernetes-v1.24.1.tar
142 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.24.2/kubernetes-v1.24.2.tar
143 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.24.3/kubernetes-v1.24.3.tar
144 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.25.0/kubernetes-v1.25.0.tar
145 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.25.4/kubernetes-v1.25.4.tar
146 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.26.0/kubernetes-v1.26.0.tar
147 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.26.1/kubernetes-v1.26.1.tar
148 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.27.1/kubernetes-v1.27.1.tar
149 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.27.3/kubernetes-v1.27.3.tar
150 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.28.0/kubernetes-v1.28.0.tar
151 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.28.3/kubernetes-v1.28.3.tar
152 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.29.2/kubernetes-v1.29.2.tar
153 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.30.1/kubernetes-v1.30.1.tar
154 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.30.2/kubernetes-v1.30.2.tar
155 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.31.1/kubernetes-v1.31.1.tar
156 | - wget https://mirrors.chenby.cn/https://github.com/cby-chen/Kubernetes/releases/download/v1.32.0/kubernetes-v1.32.0.tar
157 |
158 | *注意:1.23.3 版本当时没想到会后续更新,所以当时命名不太规范。
159 |
160 |
161 | # 八、常见异常
162 |
163 | - 注意hosts配置文件中主机名和IP地址对应
164 |
165 | - 在文档7.2,却记别忘记执行`kubectl create -f bootstrap.secret.yaml`命令
166 |
167 | - 重启服务器之后出现异常,可以查看`systemctl status kubelet.service`服务是否正常
168 |
169 | - 在 centos7 环境下需要升级 runc 和 libseccomp
170 | 详见 https://github.com/cby-chen/Kubernetes/blob/main/doc/v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md#9%E5%AE%89%E8%A3%85%E7%BD%91%E7%BB%9C%E6%8F%92%E4%BB%B6
171 |
172 | - 安装会出现kubelet异常,无法识别 `--node-labels` 字段问题,原因如下。
173 | 将 `--node-labels=node.kubernetes.io/node=''` 替换为 `--node-labels=node.kubernetes.io/node=` 将 `''` 删除即可。
174 |
175 | - IPv6无法正常访问,kubelet服务需要添加`--node-ip=`参数,若动态获取IP地址变动之后需要重新配置,详细查看文档 https://github.com/cby-chen/Kubernetes/blob/main/doc/v1.28.3-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves-Offline.md#82kubelet%E9%85%8D%E7%BD%AE
176 |
177 | # 九、其他
178 |
179 | ## 生产环境推荐配置
180 |
181 | ### Master节点:
182 | - 三个节点实现高可用(必须)
183 | - 节点数:0-100 8核16+
184 | - 节点数:100-250 8核32G+
185 | - 节点数:250-500 16核32G+
186 |
187 | ### etcd节点:
188 | - 三个节点实现高可用(必须),有条件存储分区必须高性能SSD硬盘,没有SSD也要有高效独立磁盘
189 | - 节点数:0-50 2核8G+ 50G SSD存储
190 | - 节点数:50-250 4核16G+ 150G SSD存储
191 | - 节点数:250-1000 8核32G+ 250G SSD存储
192 |
193 | ### Node节点:
194 | - 无特殊要求,主要是Docker数据分区、系统分区需要单独使用,不可以使用同一个磁盘,系统分区100G+、Docker数据分区200G+,有条件使用SSD硬盘,必须独立于系统盘
195 |
196 | ### 其他:
197 | - 集群规模不大可以将etcd和master放置于同一个宿主机,
198 | - 也就是每个master节点部署k8s组件和etcd服务,但是etcd的数据目录一定要独立,并且使用SSD,
199 | - 两者部署在一起需要相对增加宿主机的资源,个人建议生产环境把master节点的资源一次性给够,
200 | - 此处的费用不应该节省,可以直接使用16核32G或者64G的机器,之后集群扩容就无需扩容master节点的资源,减少风险。
201 | - 其中master节点和etcd节点的系统分区100G即可。
202 |
203 | ### 添加好友
204 |
205 |
206 | ### 打赏
207 |
208 |
209 | - 建议在 [Kubernetes](https://github.com/cby-chen/Kubernetes) 查看文档,后续会陆续更新文档
210 | - 小陈网站:
211 |
212 | > https://www.oiox.cn/
213 | >
214 | > https://www.oiox.cn/index.php/start-page.html
215 | >
216 | > **CSDN、GitHub、51CTO、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
217 | >
218 | > **全网可搜《小陈运维》**
219 | >
220 | > **文章主要发布于微信公众号**
221 |
222 |
223 | ## Stargazers over time
224 |
225 | [](https://starchart.cc/cby-chen/Kubernetes)
226 |
--------------------------------------------------------------------------------
/bootstrap/bootstrap.secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: bootstrap-token-c8ad9c
5 | namespace: kube-system
6 | type: bootstrap.kubernetes.io/token
7 | stringData:
8 | description: "The default bootstrap token generated by 'kubelet '."
9 | token-id: c8ad9c
10 | token-secret: 2e4d610cf3e7426e
11 | usage-bootstrap-authentication: "true"
12 | usage-bootstrap-signing: "true"
13 | auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
14 |
15 | ---
16 | apiVersion: rbac.authorization.k8s.io/v1
17 | kind: ClusterRoleBinding
18 | metadata:
19 | name: kubelet-bootstrap
20 | roleRef:
21 | apiGroup: rbac.authorization.k8s.io
22 | kind: ClusterRole
23 | name: system:node-bootstrapper
24 | subjects:
25 | - apiGroup: rbac.authorization.k8s.io
26 | kind: Group
27 | name: system:bootstrappers:default-node-token
28 | ---
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | kind: ClusterRoleBinding
31 | metadata:
32 | name: node-autoapprove-bootstrap
33 | roleRef:
34 | apiGroup: rbac.authorization.k8s.io
35 | kind: ClusterRole
36 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
37 | subjects:
38 | - apiGroup: rbac.authorization.k8s.io
39 | kind: Group
40 | name: system:bootstrappers:default-node-token
41 | ---
42 | apiVersion: rbac.authorization.k8s.io/v1
43 | kind: ClusterRoleBinding
44 | metadata:
45 | name: node-autoapprove-certificate-rotation
46 | roleRef:
47 | apiGroup: rbac.authorization.k8s.io
48 | kind: ClusterRole
49 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
50 | subjects:
51 | - apiGroup: rbac.authorization.k8s.io
52 | kind: Group
53 | name: system:nodes
54 | ---
55 | apiVersion: rbac.authorization.k8s.io/v1
56 | kind: ClusterRole
57 | metadata:
58 | annotations:
59 | rbac.authorization.kubernetes.io/autoupdate: "true"
60 | labels:
61 | kubernetes.io/bootstrapping: rbac-defaults
62 | name: system:kube-apiserver-to-kubelet
63 | rules:
64 | - apiGroups:
65 | - ""
66 | resources:
67 | - nodes/proxy
68 | - nodes/stats
69 | - nodes/log
70 | - nodes/spec
71 | - nodes/metrics
72 | verbs:
73 | - "*"
74 | ---
75 | apiVersion: rbac.authorization.k8s.io/v1
76 | kind: ClusterRoleBinding
77 | metadata:
78 | name: system:kube-apiserver
79 | namespace: ""
80 | roleRef:
81 | apiGroup: rbac.authorization.k8s.io
82 | kind: ClusterRole
83 | name: system:kube-apiserver-to-kubelet
84 | subjects:
85 | - apiGroup: rbac.authorization.k8s.io
86 | kind: User
87 | name: kube-apiserver
88 |
--------------------------------------------------------------------------------
/dashboard/dashboard-user.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 | namespace: kube-system
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: admin-user
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | name: admin-user
18 | namespace: kube-system
19 |
--------------------------------------------------------------------------------
/doc/Enable-implement-IPv4-IPv6.md:
--------------------------------------------------------------------------------
1 | 背景
2 | ==
3 |
4 | 如今IPv4IP地址已经使用完毕,未来全球会以IPv6地址为中心,会大力发展IPv6网络环境,由于IPv6可以实现给任何一个设备分配到公网IP,所以资源是非常丰富的。
5 |
6 |
7 | 配置hosts
8 | =======
9 |
10 | ```shell
11 | [root@k8s-master01 ~]# vim /etc/hosts
12 | [root@k8s-master01 ~]# cat /etc/hosts
13 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
14 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
15 | 2408:8207:78ce:7561::10 k8s-master01
16 | 2408:8207:78ce:7561::20 k8s-master02
17 | 2408:8207:78ce:7561::30 k8s-master03
18 | 2408:8207:78ce:7561::40 k8s-node01
19 | 2408:8207:78ce:7561::50 k8s-node02
20 | 2408:8207:78ce:7561::60 k8s-node03
21 | 2408:8207:78ce:7561::70 k8s-node04
22 | 2408:8207:78ce:7561::80 k8s-node05
23 |
24 | 10.0.0.81 k8s-master01
25 | 10.0.0.82 k8s-master02
26 | 10.0.0.83 k8s-master03
27 | 10.0.0.84 k8s-node01
28 | 10.0.0.85 k8s-node02
29 | 10.0.0.86 k8s-node03
30 | 10.0.0.87 k8s-node04
31 | 10.0.0.88 k8s-node05
32 | 10.0.0.80 lb01
33 | 10.0.0.90 lb02
34 | 10.0.0.99 lb-vip
35 |
36 | [root@k8s-master01 ~]#
37 |
38 | ```
39 |
40 | 配置ipv6地址
41 | ========
42 |
43 | ```shell
44 | [root@k8s-master01 ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens160
45 | [root@k8s-master01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens160
46 | TYPE=Ethernet
47 | PROXY_METHOD=none
48 | BROWSER_ONLY=no
49 | BOOTPROTO=none
50 | DEFROUTE=yes
51 | IPV4_FAILURE_FATAL=no
52 | IPV6INIT=yes
53 | IPV6_AUTOCONF=no
54 | IPV6ADDR=2408:8207:78ce:7561::10/64
55 | IPV6_DEFAULTGW=2408:8207:78ce:7561::1
56 | IPV6_DEFROUTE=yes
57 | IPV6_FAILURE_FATAL=no
58 | NAME=ens160
59 | UUID=56ca7c8c-21c6-484f-acbd-349111b3ddb5
60 | DEVICE=ens160
61 | ONBOOT=yes
62 | IPADDR=10.0.0.81
63 | PREFIX=24
64 | GATEWAY=10.0.0.1
65 | DNS1=8.8.8.8
66 | DNS2=2408:8000:1010:1::8
67 | [root@k8s-master01 ~]#
68 |
69 | ```
70 |
71 | 注意:每一台主机都需要配置为静态IPv6地址!若不进行配置,在内核中开启IPv6数据包转发功能后会出现IPv6异常。
72 |
73 | sysctl参数启用ipv6
74 | ==============
75 |
76 | ```shell
77 | [root@k8s-master01 ~]# vim /etc/sysctl.d/k8s.conf
78 | [root@k8s-master01 ~]# cat /etc/sysctl.d/k8s.conf
79 | net.ipv4.ip_forward = 1
80 | net.bridge.bridge-nf-call-iptables = 1
81 | fs.may_detach_mounts = 1
82 | vm.overcommit_memory=1
83 | vm.panic_on_oom=0
84 | fs.inotify.max_user_watches=89100
85 | fs.file-max=52706963
86 | fs.nr_open=52706963
87 | net.netfilter.nf_conntrack_max=2310720
88 |
89 |
90 | net.ipv4.tcp_keepalive_time = 600
91 | net.ipv4.tcp_keepalive_probes = 3
92 | net.ipv4.tcp_keepalive_intvl =15
93 | net.ipv4.tcp_max_tw_buckets = 36000
94 | net.ipv4.tcp_tw_reuse = 1
95 | net.ipv4.tcp_max_orphans = 327680
96 | net.ipv4.tcp_orphan_retries = 3
97 | net.ipv4.tcp_syncookies = 1
98 | net.ipv4.tcp_max_syn_backlog = 16384
99 | net.ipv4.ip_conntrack_max = 65536
100 | net.ipv4.tcp_max_syn_backlog = 16384
101 | net.ipv4.tcp_timestamps = 0
102 | net.core.somaxconn = 16384
103 |
104 |
105 | net.ipv6.conf.all.disable_ipv6 = 0
106 | net.ipv6.conf.default.disable_ipv6 = 0
107 | net.ipv6.conf.lo.disable_ipv6 = 0
108 | net.ipv6.conf.all.forwarding = 0
109 |
110 | [root@k8s-master01 ~]#
111 | [root@k8s-master01 ~]# reboot
112 |
113 | ```
114 |
115 | 测试访问公网IPv6
116 | ==========
117 |
118 | ```shell
119 | [root@k8s-master01 ~]# ping www.chenby.cn -6
120 | PING www.chenby.cn(2408:871a:5100:119:1d:: (2408:871a:5100:119:1d::)) 56 data bytes
121 | 64 bytes from 2408:871a:5100:119:1d:: (2408:871a:5100:119:1d::): icmp_seq=1 ttl=53 time=10.6 ms
122 | 64 bytes from 2408:871a:5100:119:1d:: (2408:871a:5100:119:1d::): icmp_seq=2 ttl=53 time=9.94 ms
123 | ^C
124 | --- www.chenby.cn ping statistics ---
125 | 2 packets transmitted, 2 received, 0% packet loss, time 1002ms
126 | rtt min/avg/max/mdev = 9.937/10.269/10.602/0.347 ms
127 | [root@k8s-master01 ~]#
128 |
129 | ```
130 |
131 | 修改kube-apiserver如下配置
132 | ====================
133 |
134 | ```shell
135 | --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112
136 | --feature-gates=IPv6DualStack=true
137 |
138 | [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
139 | [root@k8s-master01 ~]# cat /usr/lib/systemd/system/kube-apiserver.service
140 |
141 | [Unit]
142 | Description=Kubernetes API Server
143 | Documentation=https://github.com/kubernetes/kubernetes
144 | After=network.target
145 |
146 | [Service]
147 | ExecStart=/usr/local/bin/kube-apiserver \
148 | --v=2 \
149 | --logtostderr=true \
150 | --allow-privileged=true \
151 | --bind-address=0.0.0.0 \
152 | --secure-port=6443 \
153 | --insecure-port=0 \
154 | --advertise-address=192.168.1.81 \
155 | --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \
156 | --feature-gates=IPv6DualStack=true \
157 | --service-node-port-range=30000-32767 \
158 | --etcd-servers=https://192.168.1.81:2379,https://192.168.1.82:2379,https://192.168.1.83:2379 \
159 | --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
160 | --etcd-certfile=/etc/etcd/ssl/etcd.pem \
161 | --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
162 | --client-ca-file=/etc/kubernetes/pki/ca.pem \
163 | --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
164 | --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
165 | --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \
166 | --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \
167 | --service-account-key-file=/etc/kubernetes/pki/sa.pub \
168 | --service-account-signing-key-file=/etc/kubernetes/pki/sa.key \
169 | --service-account-issuer=https://kubernetes.default.svc.cluster.local \
170 | --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
171 | --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
172 | --authorization-mode=Node,RBAC \
173 | --enable-bootstrap-token-auth=true \
174 | --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
175 | --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \
176 | --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \
177 | --requestheader-allowed-names=aggregator \
178 | --requestheader-group-headers=X-Remote-Group \
179 | --requestheader-extra-headers-prefix=X-Remote-Extra- \
180 | --requestheader-username-headers=X-Remote-User \
181 | --enable-aggregator-routing=true
182 | # --token-auth-file=/etc/kubernetes/token.csv
183 |
184 | Restart=on-failure
185 | RestartSec=10s
186 | LimitNOFILE=65535
187 |
188 | [Install]
189 | WantedBy=multi-user.target
190 |
191 | ```
192 |
193 | 修改kube-controller-manager如下配置
194 | ====================
195 |
196 | ```shell
197 | --feature-gates=IPv6DualStack=true
198 | --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112
199 | --cluster-cidr=172.16.0.0/12,fc00:2222::/112
200 | --node-cidr-mask-size-ipv4=24
201 | --node-cidr-mask-size-ipv6=120
202 |
203 | [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-controller-manager.service
204 | [root@k8s-master01 ~]# cat /usr/lib/systemd/system/kube-controller-manager.service
205 | [Unit]
206 | Description=Kubernetes Controller Manager
207 | Documentation=https://github.com/kubernetes/kubernetes
208 | After=network.target
209 |
210 | [Service]
211 | ExecStart=/usr/local/bin/kube-controller-manager \
212 | --v=2 \
213 | --logtostderr=true \
214 | --address=127.0.0.1 \
215 | --root-ca-file=/etc/kubernetes/pki/ca.pem \
216 | --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
217 | --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
218 | --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
219 | --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
220 | --leader-elect=true \
221 | --use-service-account-credentials=true \
222 | --node-monitor-grace-period=40s \
223 | --node-monitor-period=5s \
224 | --pod-eviction-timeout=2m0s \
225 | --controllers=*,bootstrapsigner,tokencleaner \
226 | --allocate-node-cidrs=true \
227 | --feature-gates=IPv6DualStack=true \
228 | --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \
229 | --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \
230 | --node-cidr-mask-size-ipv4=24 \
231 | --node-cidr-mask-size-ipv6=120 \
232 | --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
233 |
234 | Restart=always
235 | RestartSec=10s
236 |
237 | [Install]
238 | WantedBy=multi-user.target
239 |
240 | ```
241 |
242 | 修改kubelet如下配置
243 | =============
244 |
245 | ```shell
246 | --feature-gates=IPv6DualStack=true
247 |
248 | [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kubelet.service
249 | [root@k8s-master01 ~]# cat /usr/lib/systemd/system/kubelet.service
250 | [Unit]
251 | Description=Kubernetes Kubelet
252 | Documentation=https://github.com/kubernetes/kubernetes
253 | After=docker.service
254 | Requires=docker.service
255 |
256 | [Service]
257 | ExecStart=/usr/local/bin/kubelet \
258 | --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
259 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
260 | --config=/etc/kubernetes/kubelet-conf.yml \
261 | --network-plugin=cni \
262 | --cni-conf-dir=/etc/cni/net.d \
263 | --cni-bin-dir=/opt/cni/bin \
264 | --container-runtime=remote \
265 | --runtime-request-timeout=15m \
266 | --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
267 | --cgroup-driver=systemd \
268 | --node-labels=node.kubernetes.io/node='' \
269 | --feature-gates=IPv6DualStack=true
270 |
271 | Restart=always
272 | StartLimitInterval=0
273 | RestartSec=10
274 |
275 | [Install]
276 | WantedBy=multi-user.target
277 |
278 | ```
279 |
280 | 修改kube-proxy如下配置
281 | ====================
282 |
283 | ```shell
284 | #修改如下配置
285 | clusterCIDR: 172.16.0.0/12,fc00:2222::/112
286 |
287 | [root@k8s-master01 ~]# vim /etc/kubernetes/kube-proxy.yaml
288 | [root@k8s-master01 ~]# cat /etc/kubernetes/kube-proxy.yaml
289 | apiVersion: kubeproxy.config.k8s.io/v1alpha1
290 | bindAddress: 0.0.0.0
291 | clientConnection:
292 | acceptContentTypes: ""
293 | burst: 10
294 | contentType: application/vnd.kubernetes.protobuf
295 | kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
296 | qps: 5
297 | clusterCIDR: 172.16.0.0/12,fc00:2222::/112
298 | configSyncPeriod: 15m0s
299 | conntrack:
300 | max: null
301 | maxPerCore: 32768
302 | min: 131072
303 | tcpCloseWaitTimeout: 1h0m0s
304 | tcpEstablishedTimeout: 24h0m0s
305 | enableProfiling: false
306 | healthzBindAddress: 0.0.0.0:10256
307 | hostnameOverride: ""
308 | iptables:
309 | masqueradeAll: false
310 | masqueradeBit: 14
311 | minSyncPeriod: 0s
312 | syncPeriod: 30s
313 | ipvs:
314 | masqueradeAll: true
315 | minSyncPeriod: 5s
316 | scheduler: "rr"
317 | syncPeriod: 30s
318 | kind: KubeProxyConfiguration
319 | metricsBindAddress: 127.0.0.1:10249
320 | mode: "ipvs"
321 | nodePortAddresses: null
322 | oomScoreAdj: -999
323 | portRange: ""
324 | udpIdleTimeout: 250ms
325 | [root@k8s-master01 ~]#
326 |
327 | ```
328 |
329 | 修改calico如下配置
330 | ============
331 |
332 | ```shell
333 | # vim calico.yaml
334 | # calico-config ConfigMap处
335 | "ipam": {
336 | "type": "calico-ipam",
337 | "assign_ipv4": "true",
338 | "assign_ipv6": "true"
339 | },
340 | - name: IP
341 | value: "autodetect"
342 |
343 | - name: IP6
344 | value: "autodetect"
345 |
346 | - name: CALICO_IPV4POOL_CIDR
347 | value: "172.16.0.0/12"
348 |
349 | - name: CALICO_IPV6POOL_CIDR
350 | value: "fc00::/48"
351 |
352 | - name: FELIX_IPV6SUPPORT
353 | value: "true"
354 | # kubectl apply -f calico.yaml
355 |
356 | ```
357 |
358 | 测试
359 | ==
360 |
361 | ```shell
362 | #部署应用
363 | [root@k8s-master01 ~]# cat cby.yaml
364 | apiVersion: apps/v1
365 | kind: Deployment
366 | metadata:
367 | name: chenby
368 | spec:
369 | replicas: 3
370 | selector:
371 | matchLabels:
372 | app: chenby
373 | template:
374 | metadata:
375 | labels:
376 | app: chenby
377 | spec:
378 | containers:
379 | - name: chenby
380 | image: nginx
381 | resources:
382 | limits:
383 | memory: "128Mi"
384 | cpu: "500m"
385 | ports:
386 | - containerPort: 80
387 |
388 | ---
389 | apiVersion: v1
390 | kind: Service
391 | metadata:
392 | name: chenby
393 | spec:
394 | ipFamilyPolicy: PreferDualStack
395 | ipFamilies:
396 | - IPv6
397 | - IPv4
398 | type: NodePort
399 | selector:
400 | app: chenby
401 | ports:
402 | - port: 80
403 | targetPort: 80
404 | [root@k8s-master01 ~]# kubectl apply -f cby.yaml
405 |
406 | #查看端口
407 | [root@k8s-master01 ~]# kubectl get svc
408 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
409 | chenby NodePort fd00::d80a 80:31535/TCP 54s
410 | kubernetes ClusterIP 10.96.0.1 443/TCP 22h
411 | [root@k8s-master01 ~]#
412 |
413 | #使用内网访问
414 | [root@k8s-master01 ~]# curl -I http://[fd00::d80a]
415 | HTTP/1.1 200 OK
416 | Server: nginx/1.21.6
417 | Date: Fri, 29 Apr 2022 07:29:28 GMT
418 | Content-Type: text/html
419 | Content-Length: 615
420 | Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
421 | Connection: keep-alive
422 | ETag: "61f01158-267"
423 | Accept-Ranges: bytes
424 |
425 | [root@k8s-master01 ~]#
426 |
427 | #使用公网访问
428 | [root@k8s-master01 ~]# curl -I http://[2408:8207:78ce:7561::10]:31535
429 | HTTP/1.1 200 OK
430 | Server: nginx/1.21.6
431 | Date: Fri, 29 Apr 2022 07:25:16 GMT
432 | Content-Type: text/html
433 | Content-Length: 615
434 | Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
435 | Connection: keep-alive
436 | ETag: "61f01158-267"
437 | Accept-Ranges: bytes
438 |
439 | [root@k8s-master01 ~]#
440 |
441 | [root@k8s-master01 ~]# curl -I http://10.0.0.81:31535
442 | HTTP/1.1 200 OK
443 | Server: nginx/1.21.6
444 | Date: Fri, 29 Apr 2022 07:26:16 GMT
445 | Content-Type: text/html
446 | Content-Length: 615
447 | Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
448 | Connection: keep-alive
449 | ETag: "61f01158-267"
450 | Accept-Ranges: bytes
451 |
452 | [root@k8s-master01 ~]#
453 |
454 | ```
455 |
456 |
457 |
458 |
459 |
460 | 
461 |
462 |
463 |
464 |
465 |
466 | > **关于**
467 | >
468 | > https://www.oiox.cn/
469 | >
470 | > https://www.oiox.cn/index.php/start-page.html
471 | >
472 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
473 | >
474 | > **全网可搜《小陈运维》**
475 | >
476 | > **文章主要发布于微信公众号:《Linux运维交流社区》**
477 |
--------------------------------------------------------------------------------
/doc/Kubernetes_docker.md:
--------------------------------------------------------------------------------
1 | ## Kubernetes 1.24 1.25 集群使用docker作为容器
2 |
3 | ### 背景
4 |
5 | 在新版本Kubernetes环境(1.24以及以上版本)下官方不在支持docker作为容器运行时了,若要继续使用docker 需要对docker进行配置一番。需要安装cri-docker作为Kubernetes容器
6 |
7 |
8 |
9 | ### 查看当前容器运行时
10 |
11 | ```shell
12 | # 查看指定节点容器运行时
13 | kubectl describe node k8s-node05 | grep Container
14 | Container Runtime Version: containerd://1.6.8
15 |
16 | # 查看所有节点容器运行时
17 | kubectl describe node | grep Container
18 | Container Runtime Version: containerd://1.6.8
19 | Container Runtime Version: containerd://1.6.8
20 | Container Runtime Version: containerd://1.6.8
21 | Container Runtime Version: containerd://1.6.8
22 | Container Runtime Version: containerd://1.6.8
23 | Container Runtime Version: containerd://1.6.8
24 | Container Runtime Version: containerd://1.6.8
25 | Container Runtime Version: containerd://1.6.8
26 | ```
27 |
28 |
29 |
30 | ### 安装docker
31 |
32 | ```shell
33 | # 更新源信息
34 | yum update
35 | # 安装必要软件
36 | yum install -y yum-utils device-mapper-persistent-data lvm2
37 |
38 | # 写入docker源信息
39 | sudo yum-config-manager \
40 | --add-repo \
41 | https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo
42 |
43 | # 更新源信息并进行安装
44 | yum update
45 | yum install docker-ce docker-ce-cli containerd.io
46 |
47 |
48 | # 配置加速器
49 | sudo mkdir -p /etc/docker
50 | sudo tee /etc/docker/daemon.json <<-'EOF'
51 | {
52 | "registry-mirrors": ["https://hub-mirror.c.163.com"],
53 | "exec-opts": ["native.cgroupdriver=systemd"]
54 | }
55 | EOF
56 | sudo systemctl daemon-reload
57 | sudo systemctl restart docker
58 | ```
59 |
60 |
61 |
62 | ### 安装cri-docker
63 |
64 | ```shell
65 | # 由于1.24以及更高版本不支持docker所以安装cri-docker
66 | # 下载cri-docker
67 | wget https://mirrors.chenby.cn/https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5.amd64.tgz
68 |
69 | # 解压cri-docker
70 | tar xvf cri-dockerd-0.2.5.amd64.tgz
71 | cp cri-dockerd/cri-dockerd /usr/bin/
72 |
73 | # 写入启动配置文件
74 | cat > /usr/lib/systemd/system/cri-docker.service < /usr/lib/systemd/system/cri-docker.socket < /usr/lib/systemd/system/kubelet.service << EOF
133 |
134 | [Unit]
135 | Description=Kubernetes Kubelet
136 | Documentation=https://github.com/kubernetes/kubernetes
137 | After=containerd.service
138 | Requires=containerd.service
139 |
140 | [Service]
141 | ExecStart=/usr/local/bin/kubelet \\
142 | --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
143 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
144 | --config=/etc/kubernetes/kubelet-conf.yml \\
145 | --container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
146 | --node-labels=node.kubernetes.io/node=
147 |
148 | [Install]
149 | WantedBy=multi-user.target
150 | EOF
151 |
152 |
153 | # 1.24 版本下 所有k8s节点配置kubelet service
154 | cat > /usr/lib/systemd/system/kubelet.service << EOF
155 |
156 | [Unit]
157 | Description=Kubernetes Kubelet
158 | Documentation=https://github.com/kubernetes/kubernetes
159 | After=containerd.service
160 | Requires=containerd.service
161 |
162 | [Service]
163 | ExecStart=/usr/local/bin/kubelet \\
164 | --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
165 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
166 | --config=/etc/kubernetes/kubelet-conf.yml \\
167 | --container-runtime=remote \\
168 | --runtime-request-timeout=15m \\
169 | --container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
170 | --cgroup-driver=systemd \\
171 | --node-labels=node.kubernetes.io/node= \\
172 | --feature-gates=IPv6DualStack=true
173 |
174 | [Install]
175 | WantedBy=multi-user.target
176 | EOF
177 |
178 |
179 |
180 | # 重启
181 | systemctl daemon-reload
182 | systemctl restart kubelet
183 | systemctl enable --now kubelet
184 | ```
185 |
186 |
187 |
188 | ### 验证
189 |
190 | ```shell
191 | # 查看指定节点容器运行时
192 | kubectl describe node k8s-node05 | grep Container
193 | Container Runtime Version: docker://20.10.17
194 |
195 | # 查看所有节点容器运行时
196 | kubectl describe node | grep Container
197 | Container Runtime Version: containerd://1.6.8
198 | Container Runtime Version: containerd://1.6.8
199 | Container Runtime Version: containerd://1.6.8
200 | Container Runtime Version: containerd://1.6.8
201 | Container Runtime Version: containerd://1.6.8
202 | Container Runtime Version: containerd://1.6.8
203 | Container Runtime Version: containerd://1.6.8
204 | Container Runtime Version: docker://20.10.17
205 | ```
206 |
207 |
208 |
209 |
210 | > **关于**
211 | >
212 | > https://www.oiox.cn/
213 | >
214 | > https://www.oiox.cn/index.php/start-page.html
215 | >
216 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
217 | >
218 | > **全网可搜《小陈运维》**
219 | >
220 | > **文章主要发布于微信公众号**
--------------------------------------------------------------------------------
/doc/Minikube_init.md:
--------------------------------------------------------------------------------
1 | ## 安装Minikube并启动一个Kubernetes环境
2 | Minikube 是一种轻量级的Kubernetes 实现,可在本地计算机上创建VM 并部署仅包含一个节点的简单集群。Minikube 可用于Linux , macOS 和Windows 系统。Minikube CLI 提供了用于引导集群工作的多种操作,包括启动、停止、查看状态和删除。
3 |
4 | ### 安装docker
5 |
6 | ```shell
7 | # 更新源信息
8 | sudo apt-get update
9 |
10 | # 安装必要软件
11 | sudo apt-get install ca-certificates curl gnupg lsb-release
12 |
13 | # 创建key
14 | sudo mkdir -p /etc/apt/keyrings
15 |
16 | # 导入key证书
17 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
18 |
19 | # 写入docker源信息
20 | echo \
21 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
22 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
23 |
24 | # 设置为国内源
25 | sed -i s#download.docker.com#mirrors.ustc.edu.cn/docker-ce#g /etc/apt/sources.list.d/docker.list
26 |
27 | # 更新源信息并进行安装
28 | sudo apt-get update
29 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
30 |
31 | # 配置加速器
32 | sudo mkdir -p /etc/docker
33 | sudo tee /etc/docker/daemon.json <<-'EOF'
34 | {
35 | "registry-mirrors": ["https://hub-mirror.c.163.com"],
36 | "exec-opts": ["native.cgroupdriver=systemd"]
37 | }
38 | EOF
39 | sudo systemctl daemon-reload
40 | sudo systemctl restart docker
41 | ```
42 |
43 | ### 安装cri-docker
44 | ```shell
45 | # 由于1.24以及更高版本不支持docker所以安装cri-docker
46 | # 下载cri-docker
47 | wget https://mirrors.chenby.cn/https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5.amd64.tgz
48 |
49 | # 解压cri-docker
50 | tar xvf cri-dockerd-0.2.5.amd64.tgz
51 | cp cri-dockerd/cri-dockerd /usr/bin/
52 |
53 | # 写入启动配置文件
54 | cat > /usr/lib/systemd/system/cri-docker.service < /usr/lib/systemd/system/cri-docker.socket < registry.cn-hangzhou.aliyun...: 386.60 MiB / 386.61 MiB 100.00% 1.37 Mi
146 | > registry.cn-hangzhou.aliyun...: 0 B [____________________] ?% ? p/s 4m9s
147 | * Creating docker container (CPUs=2, Memory=2200MB) ...
148 | * Preparing Kubernetes v1.24.3 on containerd 1.6.6 ...
149 | > kubelet.sha256: 64 B / 64 B [-------------------------] 100.00% ? p/s 0s
150 | > kubectl.sha256: 64 B / 64 B [-------------------------] 100.00% ? p/s 0s
151 | > kubeadm.sha256: 64 B / 64 B [-------------------------] 100.00% ? p/s 0s
152 | > kubeadm: 42.32 MiB / 42.32 MiB [--------------] 100.00% 1.36 MiB p/s 31s
153 | > kubectl: 43.59 MiB / 43.59 MiB [--------------] 100.00% 1.02 MiB p/s 43s
154 | > kubelet: 110.64 MiB / 110.64 MiB [----------] 100.00% 1.36 MiB p/s 1m22s
155 | - Generating certificates and keys ...
156 | - Booting up control plane ...
157 | - Configuring RBAC rules ...
158 | * Configuring CNI (Container Networking Interface) ...
159 | * Verifying Kubernetes components...
160 | - Using image registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner:v5
161 | * Enabled addons: storage-provisioner, default-storageclass
162 | * Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default
163 | root@cby:~#
164 | ```
165 |
166 | ### 验证
167 | ```shell
168 | root@cby:~# kubectl get node
169 | NAME STATUS ROLES AGE VERSION
170 | minikube Ready control-plane 43s v1.24.3
171 | root@cby:~#
172 | root@cby:~# kubectl get pod -A
173 | NAMESPACE NAME READY STATUS RESTARTS AGE
174 | kube-system coredns-7f74c56694-znvr4 1/1 Running 0 31s
175 | kube-system etcd-minikube 1/1 Running 0 43s
176 | kube-system kindnet-nt8nf 1/1 Running 0 31s
177 | kube-system kube-apiserver-minikube 1/1 Running 0 43s
178 | kube-system kube-controller-manager-minikube 1/1 Running 0 43s
179 | kube-system kube-proxy-ztq87 1/1 Running 0 31s
180 | kube-system kube-scheduler-minikube 1/1 Running 0 43s
181 | kube-system storage-provisioner 1/1 Running 0 41s
182 | root@cby:~#
183 | ```
184 |
185 | ### 附录
186 |
187 | ```
188 | # 若出现错误可以做如下操作
189 | minikube delete
190 | rm -rf .minikube/
191 | ```
192 |
193 |
194 |
195 |
196 | > **关于**
197 | >
198 | > https://www.oiox.cn/
199 | >
200 | > https://www.oiox.cn/index.php/start-page.html
201 | >
202 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
203 | >
204 | > **全网可搜《小陈运维》**
205 | >
206 | > **文章主要发布于微信公众号**
207 |
--------------------------------------------------------------------------------
/doc/Upgrade_Kubernetes.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## 升级二进制kubernetes集群
4 |
5 |
6 |
7 | ### 背景介绍
8 |
9 | 最近由于时间不足,暂时无法对小版本更新第一时间出新的文档。若需要升级集群版本,可以参考此文档进行操作,每个节点一个一个的更新。大版本更新请各位持续关注我的Github项目仓库。后续更新会在仓库持续更新。感谢各位小伙伴一直以来的支持。
10 |
11 | 此文档基于我的二进制安装仓库 https://github.com/cby-chen/Kubernetes
12 |
13 |
14 |
15 | ### 基础操作
16 |
17 | #### 查看当前版本信息
18 |
19 | ```shell
20 | [root@k8s-master01 ~]# kubectl get node
21 | NAME STATUS ROLES AGE VERSION
22 | k8s-master01 Ready 57d v1.23.6
23 | k8s-master02 Ready 57d v1.23.6
24 | k8s-master03 Ready 57d v1.23.6
25 | k8s-node01 Ready 57d v1.23.6
26 | k8s-node02 Ready 57d v1.23.6
27 | [root@k8s-master01 ~]#
28 | ```
29 |
30 |
31 |
32 | #### 主机域名以及IP地址
33 |
34 | ```shell
35 | [root@k8s-master01 ~]# cat /etc/hosts | grep k8s
36 | 192.168.1.230 k8s-master01
37 | 192.168.1.231 k8s-master02
38 | 192.168.1.232 k8s-master03
39 | 192.168.1.233 k8s-node01
40 | 192.168.1.234 k8s-node02
41 | [root@k8s-master01 ~]#
42 | ```
43 |
44 |
45 |
46 | #### 下载二进制安装包
47 |
48 | ```shell
49 | [root@k8s-master01 ~]# wget https://dl.k8s.io/v1.23.9/kubernetes-server-linux-amd64.tar.gz
50 | [root@k8s-master01 ~]#
51 | ```
52 |
53 |
54 |
55 | #### 解压二进制安装包
56 |
57 | ```shell
58 | [root@k8s-master01 ~]# tar xf kubernetes-server-linux-amd64.tar.gz
59 | [root@k8s-master01 ~]#
60 | ```
61 |
62 |
63 |
64 | ### 升级Maser
65 |
66 | #### 升级三台主节点上的客户端
67 |
68 | ```shell
69 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kubectl root@192.168.1.230:/usr/local/bin/
70 | [root@k8s-master01 ~]#
71 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kubectl root@192.168.1.231:/usr/local/bin/
72 | [root@k8s-master01 ~]#
73 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kubectl root@192.168.1.232:/usr/local/bin/
74 | [root@k8s-master01 ~]#
75 | ```
76 |
77 |
78 |
79 | #### 升级三台主节点api组件
80 |
81 | ```shell
82 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl stop kube-apiserver"
83 | [root@k8s-master01 ~]#
84 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kube-apiserver root@192.168.1.230:/usr/local/bin/
85 | [root@k8s-master01 ~]#
86 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl start kube-apiserver"
87 | [root@k8s-master01 ~]#
88 | [root@k8s-master01 ~]# kube-apiserver --version
89 | Kubernetes v1.23.9
90 | [root@k8s-master01 ~]#
91 | ```
92 |
93 |
94 |
95 | #### 升级三台主节点控制器组件
96 |
97 | ```shell
98 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl stop kube-controller-manager"
99 | [root@k8s-master01 ~]#
100 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kube-controller-manager root@192.168.1.230:/usr/local/bin/
101 | [root@k8s-master01 ~]#
102 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl start kube-controller-manager"
103 | [root@k8s-master01 ~]#
104 | ```
105 |
106 |
107 |
108 | #### 升级三台主节点选择器组件
109 |
110 | ```shell
111 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl stop kube-scheduler"
112 | [root@k8s-master01 ~]#
113 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kube-scheduler root@192.168.1.230:/usr/local/bin/
114 | [root@k8s-master01 ~]#
115 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl start kube-scheduler"
116 | [root@k8s-master01 ~]#
117 | ```
118 |
119 |
120 |
121 | ### 升级Worker
122 |
123 | #### 每一台机器都要升级kubelet
124 |
125 | ```shell
126 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl stop kubelet"
127 | [root@k8s-master01 ~]#
128 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kubelet root@192.168.1.230:/usr/local/bin/
129 | [root@k8s-master01 ~]#
130 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl start kubelet"
131 | [root@k8s-master01 ~]#
132 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "kubelet --version"
133 | Kubernetes v1.23.9
134 | [root@k8s-master01 ~]#
135 | ```
136 |
137 |
138 |
139 | #### 每一台机器都要升级kube-proxy
140 |
141 | ```shell
142 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl stop kube-proxy"
143 | [root@k8s-master01 ~]#
144 | [root@k8s-master01 ~]# scp kubernetes/server/bin/kube-proxy root@192.168.1.230:/usr/local/bin/
145 | [root@k8s-master01 ~]#
146 | [root@k8s-master01 ~]# ssh root@192.168.1.230 "systemctl start kube-proxy"
147 | [root@k8s-master01 ~]#
148 | ```
149 |
150 |
151 |
152 | ### 验证
153 |
154 | ```shell
155 | [root@k8s-master01 ~]# kubectl get node
156 | NAME STATUS ROLES AGE VERSION
157 | k8s-master01 Ready 57d v1.23.9
158 | k8s-master02 Ready 57d v1.23.9
159 | k8s-master03 Ready 57d v1.23.9
160 | k8s-node01 Ready 57d v1.23.9
161 | k8s-node02 Ready 57d v1.23.9
162 | [root@k8s-master01 ~]#
163 | [root@k8s-master01 ~]# kubectl version
164 | Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.9", GitCommit:"c1de2d70269039fe55efb98e737d9a29f9155246", GitTreeState:"clean", BuildDate:"2022-07-13T14:26:51Z", GoVersion:"go1.17.11", Compiler:"gc", Platform:"linux/amd64"}
165 | Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.9", GitCommit:"c1de2d70269039fe55efb98e737d9a29f9155246", GitTreeState:"clean", BuildDate:"2022-07-13T14:19:57Z", GoVersion:"go1.17.11", Compiler:"gc", Platform:"linux/amd64"}
166 | [root@k8s-master01 ~]#
167 | ```
168 |
169 |
170 |
171 | > **关于**
172 | >
173 | > https://www.oiox.cn/
174 | >
175 | > https://www.oiox.cn/index.php/start-page.html
176 | >
177 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
178 | >
179 | > **全网可搜《小陈运维》**
180 | >
181 | > **文章主要发布于微信公众号**
182 |
--------------------------------------------------------------------------------
/doc/kube-proxy_permissions.md:
--------------------------------------------------------------------------------
1 | # 修复kube-proxy证书权限过大问题
2 |
3 |
4 |
5 | 之前kube-proxy服务都是用admin集群证书,造成权限过大不安全,后续该问题,将在文档中修复
6 |
7 | 请关注 https://github.com/cby-chen/Kubernetes
8 |
9 |
10 |
11 | ## 创建生成证书配置文件
12 |
13 | ```shell
14 | 详细见:https://github.com/cby-chen/Kubernetes#23%E5%88%9B%E5%BB%BA%E8%AF%81%E4%B9%A6%E7%9B%B8%E5%85%B3%E6%96%87%E4%BB%B6
15 |
16 | cat > ca-config.json << EOF
17 | {
18 | "signing": {
19 | "default": {
20 | "expiry": "876000h"
21 | },
22 | "profiles": {
23 | "kubernetes": {
24 | "usages": [
25 | "signing",
26 | "key encipherment",
27 | "server auth",
28 | "client auth"
29 | ],
30 | "expiry": "876000h"
31 | }
32 | }
33 | }
34 | }
35 | EOF
36 |
37 | cat > kube-proxy-csr.json << EOF
38 | {
39 | "CN": "system:kube-proxy",
40 | "key": {
41 | "algo": "rsa",
42 | "size": 2048
43 | },
44 | "names": [
45 | {
46 | "C": "CN",
47 | "ST": "Beijing",
48 | "L": "Beijing",
49 | "O": "system:kube-proxy",
50 | "OU": "Kubernetes-manual"
51 | }
52 | ]
53 | }
54 | EOF
55 | ```
56 |
57 |
58 |
59 | ## 生成 CA 证书和私钥
60 |
61 | ```shell
62 | cfssl gencert \
63 | -ca=/etc/kubernetes/pki/ca.pem \
64 | -ca-key=/etc/kubernetes/pki/ca-key.pem \
65 | -config=ca-config.json \
66 | -profile=kubernetes \
67 | kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
68 |
69 |
70 |
71 |
72 | ll /etc/kubernetes/pki/kube-proxy*
73 | -rw-r--r-- 1 root root 1045 May 26 10:21 /etc/kubernetes/pki/kube-proxy.csr
74 | -rw------- 1 root root 1675 May 26 10:21 /etc/kubernetes/pki/kube-proxy-key.pem
75 | -rw-r--r-- 1 root root 1464 May 26 10:21 /etc/kubernetes/pki/kube-proxy.pem
76 | ```
77 |
78 | 设置集群参数和客户端认证参数时 --embed-certs 都为 true,这会将 certificate-authority、client-certificate 和 client-key 指向的证书文件内容写入到生成的 kube-proxy.kubeconfig 文件中;
79 |
80 | kube-proxy.pem 证书中 CN 为 system:kube-proxy,kube-apiserver 预定义的 RoleBinding cluster-admin 将User system:kube-proxy 与 Role system:node-proxier 绑定,该 Role 授予了调用 kube-apiserver Proxy 相关 API 的权限;
81 |
82 |
83 |
84 | ## 创建 kubeconfig 文件
85 |
86 | ```shell
87 | kubectl config set-cluster kubernetes \
88 | --certificate-authority=/etc/kubernetes/pki/ca.pem \
89 | --embed-certs=true \
90 | --server=https://10.0.0.89:8443 \
91 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
92 |
93 | kubectl config set-credentials kube-proxy \
94 | --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
95 | --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
96 | --embed-certs=true \
97 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
98 |
99 | kubectl config set-context kube-proxy@kubernetes \
100 | --cluster=kubernetes \
101 | --user=kube-proxy \
102 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
103 |
104 | kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
105 | ```
106 |
107 |
108 |
109 | ## 无法访问 pod资源
110 |
111 | ```shell
112 | [cby@k8s-master01 ~]$ kubectl get pod
113 | Error from server (Forbidden): pods is forbidden: User "system:kube-proxy" cannot list resource "pods" in API group "" in the namespace "default"
114 | [cby@k8s-master01 ~]$
115 | ```
116 |
117 |
118 |
119 | ## 可以访问 node资源
120 |
121 | ```shell
122 | [cby@k8s-master01 ~]$ kubectl get node
123 | NAME STATUS ROLES AGE VERSION
124 | k8s-master01 Ready 2d21h v1.24.0
125 | k8s-master02 Ready 2d21h v1.24.0
126 | k8s-master03 Ready 2d21h v1.24.0
127 | k8s-node01 Ready 2d21h v1.24.0
128 | k8s-node02 Ready 2d21h v1.24.0
129 | [cby@k8s-master01 ~]$
130 |
131 | ```
132 |
133 |
134 |
135 | ## 将配置进行替换
136 |
137 | ```shell
138 | for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
139 |
140 | for NODE in k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
141 |
142 | [root@k8s-master01 ~]# cat /etc/kubernetes/kube-proxy.yaml
143 | apiVersion: kubeproxy.config.k8s.io/v1alpha1
144 | bindAddress: 0.0.0.0
145 | clientConnection:
146 | acceptContentTypes: ""
147 | burst: 10
148 | contentType: application/vnd.kubernetes.protobuf
149 | kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
150 | qps: 5
151 | clusterCIDR: 172.16.0.0/12,fc00:2222::/112
152 | configSyncPeriod: 15m0s
153 | conntrack:
154 | max: null
155 | maxPerCore: 32768
156 | min: 131072
157 | tcpCloseWaitTimeout: 1h0m0s
158 | tcpEstablishedTimeout: 24h0m0s
159 | enableProfiling: false
160 | healthzBindAddress: 0.0.0.0:10256
161 | hostnameOverride: ""
162 | iptables:
163 | masqueradeAll: false
164 | masqueradeBit: 14
165 | minSyncPeriod: 0s
166 | syncPeriod: 30s
167 | ipvs:
168 | masqueradeAll: true
169 | minSyncPeriod: 5s
170 | scheduler: "rr"
171 | syncPeriod: 30s
172 | kind: KubeProxyConfiguration
173 | metricsBindAddress: 127.0.0.1:10249
174 | mode: "ipvs"
175 | nodePortAddresses: null
176 | oomScoreAdj: -999
177 | portRange: ""
178 | udpIdleTimeout: 250ms
179 |
180 | [root@k8s-master01 ~]# systemctl restart kube-proxy
181 | ```
182 |
183 |
184 |
185 |
186 |
187 | > **关于**
188 | >
189 | > https://www.oiox.cn/
190 | >
191 | > https://www.oiox.cn/index.php/start-page.html
192 | >
193 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
194 | >
195 | > **全网可搜《小陈运维》**
196 | >
197 | > **文章主要发布于微信公众号:《Linux运维交流社区》**
198 |
--------------------------------------------------------------------------------
/doc/kubeadm-install-IPV6-IPV4.md:
--------------------------------------------------------------------------------
1 | 使用kubeadm初始化IPV4/IPV6集群
2 | =======================
3 |
4 | CentOS 配置YUM源
5 | =============
6 |
7 | ```shell
8 | cat < /etc/yum.repos.d/kubernetes.repo
9 | [kubernetes]
10 | name=kubernetes
11 | baseurl=https://mirrors.ustc.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
12 | enabled=1
13 | EOF
14 | setenforce 0
15 | yum install -y kubelet kubeadm kubectl
16 |
17 | # 如安装老版本
18 | # yum install kubelet-1.16.9-0 kubeadm-1.16.9-0 kubectl-1.16.9-0
19 |
20 | systemctl enable kubelet && systemctl start kubelet
21 |
22 |
23 | # 将 SELinux 设置为 permissive 模式(相当于将其禁用)
24 | sudo setenforce 0
25 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
26 | sudo systemctl enable --now kubelet
27 |
28 | ```
29 |
30 | Ubuntu 配置APT源
31 | =============
32 |
33 | ```shell
34 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
35 | cat </etc/apt/sources.list.d/kubernetes.list
36 | deb https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main
37 | EOF
38 | apt-get update
39 | apt-get install -y kubelet kubeadm kubectl
40 |
41 | # 如安装老版本
42 | # apt install kubelet=1.23.6-00 kubeadm=1.23.6-00 kubectl=1.23.6-00
43 |
44 | ```
45 |
46 | 配置containerd
47 | ============
48 |
49 | ```shell
50 | wget https://github.com/containerd/containerd/releases/download/v1.6.4/cri-containerd-cni-1.6.4-linux-amd64.tar.gz
51 |
52 | #解压
53 | tar -C / -xzf cri-containerd-cni-1.6.4-linux-amd64.tar.gz
54 |
55 | #创建服务启动文件
56 | cat > /etc/systemd/system/containerd.service < /etc/hosts < --discovery-token-ca-cert-hash sha256:2ade8c834a41cc1960993a600c89fa4bb86e3594f82e09bcd42633d4defbda0d
284 | [preflight] Running pre-flight checks
285 | [preflight] Reading configuration from the cluster...
286 | [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
287 | [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
288 | [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
289 | [kubelet-start] Starting the kubelet
290 | [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
291 |
292 | This node has joined the cluster:
293 | * Certificate signing request was sent to apiserver and a response was received.
294 | * The Kubelet was informed of the new secure connection details.
295 |
296 | Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
297 |
298 | root@k8s-node01:~#
299 |
300 |
301 | root@k8s-node02:~# kubeadm join 10.0.0.21:6443 --token qf3z22.qwtqieutbkik6dy4 \
302 | > --discovery-token-ca-cert-hash sha256:2ade8c834a41cc1960993a600c89fa4bb86e3594f82e09bcd42633d4defbda0d
303 | [preflight] Running pre-flight checks
304 | [preflight] Reading configuration from the cluster...
305 | [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
306 | [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
307 | [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
308 | [kubelet-start] Starting the kubelet
309 | [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
310 |
311 | This node has joined the cluster:
312 | * Certificate signing request was sent to apiserver and a response was received.
313 | * The Kubelet was informed of the new secure connection details.
314 |
315 | Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
316 |
317 | root@k8s-node02:~#
318 |
319 | ```
320 |
321 | 查看集群
322 | ====
323 |
324 | ```shell
325 | root@k8s-master01:~# kubectl get node
326 | NAME STATUS ROLES AGE VERSION
327 | k8s-master01 Ready control-plane 111s v1.24.0
328 | k8s-node01 Ready 82s v1.24.0
329 | k8s-node02 Ready 92s v1.24.0
330 | root@k8s-master01:~#
331 | root@k8s-master01:~#
332 | root@k8s-master01:~# kubectl get pod -A
333 | NAMESPACE NAME READY STATUS RESTARTS AGE
334 | kube-system coredns-bc77466fc-jxkpv 1/1 Running 0 83s
335 | kube-system coredns-bc77466fc-nrc9l 1/1 Running 0 83s
336 | kube-system etcd-k8s-master01 1/1 Running 0 87s
337 | kube-system kube-apiserver-k8s-master01 1/1 Running 0 89s
338 | kube-system kube-controller-manager-k8s-master01 1/1 Running 0 87s
339 | kube-system kube-proxy-2lgrn 1/1 Running 0 83s
340 | kube-system kube-proxy-69p9r 1/1 Running 0 47s
341 | kube-system kube-proxy-g58m2 1/1 Running 0 42s
342 | kube-system kube-scheduler-k8s-master01 1/1 Running 0 87s
343 | root@k8s-master01:~#
344 |
345 | ```
346 |
347 | 配置calico
348 | ========
349 |
350 | ```shell
351 | wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/calico-ipv6.yaml
352 |
353 | # vim calico-ipv6.yaml
354 | # calico-config ConfigMap处
355 | "ipam": {
356 | "type": "calico-ipam",
357 | "assign_ipv4": "true",
358 | "assign_ipv6": "true"
359 | },
360 | - name: IP
361 | value: "autodetect"
362 |
363 | - name: IP6
364 | value: "autodetect"
365 |
366 | - name: CALICO_IPV4POOL_CIDR
367 | value: "172.16.0.0/12"
368 |
369 | - name: CALICO_IPV6POOL_CIDR
370 | value: "fc00::/48"
371 |
372 | - name: FELIX_IPV6SUPPORT
373 | value: "true"
374 |
375 | kubectl apply -f calico-ipv6.yaml
376 |
377 | ```
378 |
379 | 测试IPV6
380 | ======
381 |
382 | ```shell
383 | root@k8s-master01:~# cat cby.yaml
384 | apiVersion: apps/v1
385 | kind: Deployment
386 | metadata:
387 | name: chenby
388 | spec:
389 | replicas: 3
390 | selector:
391 | matchLabels:
392 | app: chenby
393 | template:
394 | metadata:
395 | labels:
396 | app: chenby
397 | spec:
398 | containers:
399 | - name: chenby
400 | image: nginx
401 | resources:
402 | limits:
403 | memory: "128Mi"
404 | cpu: "500m"
405 | ports:
406 | - containerPort: 80
407 |
408 | ---
409 | apiVersion: v1
410 | kind: Service
411 | metadata:
412 | name: chenby
413 | spec:
414 | ipFamilyPolicy: PreferDualStack
415 | ipFamilies:
416 | - IPv6
417 | - IPv4
418 | type: NodePort
419 | selector:
420 | app: chenby
421 | ports:
422 | - port: 80
423 | targetPort: 80
424 |
425 | kubectl apply -f cby.yaml
426 |
427 | root@k8s-master01:~# kubectl get pod
428 | NAME READY STATUS RESTARTS AGE
429 | chenby-57479d5997-6pfzg 1/1 Running 0 6m
430 | chenby-57479d5997-jjwpk 1/1 Running 0 6m
431 | chenby-57479d5997-pzrkc 1/1 Running 0 6m
432 |
433 | root@k8s-master01:~# kubectl get svc
434 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
435 | chenby NodePort fd00::f816 80:30265/TCP 6m7s
436 | kubernetes ClusterIP 10.96.0.1 443/TCP 168m
437 |
438 | root@k8s-master01:~# curl -I http://[2408:8207:78ce:7561::21]:30265/
439 | HTTP/1.1 200 OK
440 | Server: nginx/1.21.6
441 | Date: Wed, 11 May 2022 07:01:43 GMT
442 | Content-Type: text/html
443 | Content-Length: 615
444 | Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
445 | Connection: keep-alive
446 | ETag: "61f01158-267"
447 | Accept-Ranges: bytes
448 |
449 | root@k8s-master01:~# curl -I http://10.0.0.21:30265/
450 | HTTP/1.1 200 OK
451 | Server: nginx/1.21.6
452 | Date: Wed, 11 May 2022 07:01:54 GMT
453 | Content-Type: text/html
454 | Content-Length: 615
455 | Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
456 | Connection: keep-alive
457 | ETag: "61f01158-267"
458 | Accept-Ranges: bytes
459 |
460 | ```
461 |
462 | > **关于**
463 | >
464 | > https://www.oiox.cn/
465 | >
466 | > https://www.oiox.cn/index.php/start-page.html
467 | >
468 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
469 | >
470 | > **全网可搜《小陈运维》**
471 | >
472 | > **文章主要发布于微信公众号:《Linux运维交流社区》**
473 |
474 |
--------------------------------------------------------------------------------
/doc/kubernetes_install_cilium.md:
--------------------------------------------------------------------------------
1 | ## kubernetes 安装cilium
2 |
3 | ### Cilium介绍
4 | Cilium是一个开源软件,用于透明地提供和保护使用Kubernetes,Docker和Mesos等Linux容器管理平台部署的应用程序服务之间的网络和API连接。
5 |
6 | Cilium基于一种名为BPF的新Linux内核技术,它可以在Linux内部动态插入强大的安全性,可见性和网络控制逻辑。 除了提供传统的网络级安全性之外,BPF的灵活性还可以在API和进程级别上实现安全性,以保护容器或容器内的通信。由于BPF在Linux内核中运行,因此可以应用和更新Cilium安全策略,而无需对应用程序代码或容器配置进行任何更改。
7 |
8 |
9 | ### 1 安装helm
10 |
11 | ```shell
12 | [root@k8s-master01 ~]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
13 | [root@k8s-master01 ~]# chmod 700 get_helm.sh
14 | [root@k8s-master01 ~]# ./get_helm.sh
15 | ```
16 |
17 | ### 2 安装cilium
18 |
19 | ```shell
20 | [root@k8s-master01 ~]# helm repo add cilium https://helm.cilium.io
21 | [root@k8s-master01 ~]# helm install cilium cilium/cilium --namespace kube-system --set hubble.relay.enabled=true --set hubble.ui.enabled=true --set prometheus.enabled=true --set operator.prometheus.enabled=true --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}"
22 |
23 | NAME: cilium
24 | LAST DEPLOYED: Sun Sep 11 00:04:30 2022
25 | NAMESPACE: kube-system
26 | STATUS: deployed
27 | REVISION: 1
28 | TEST SUITE: None
29 | NOTES:
30 | You have successfully installed Cilium with Hubble.
31 |
32 | Your release version is 1.12.1.
33 |
34 | For any further help, visit https://docs.cilium.io/en/v1.12/gettinghelp
35 | [root@k8s-master01 ~]#
36 | ```
37 |
38 | ### 3 查看
39 |
40 | ```shell
41 | [root@k8s-master01 ~]# kubectl get pod -A | grep cil
42 | kube-system cilium-gmr6c 1/1 Running 0 5m3s
43 | kube-system cilium-kzgdj 1/1 Running 0 5m3s
44 | kube-system cilium-operator-69b677f97c-6pw4k 1/1 Running 0 5m3s
45 | kube-system cilium-operator-69b677f97c-xzzdk 1/1 Running 0 5m3s
46 | kube-system cilium-q2rnr 1/1 Running 0 5m3s
47 | kube-system cilium-smx5v 1/1 Running 0 5m3s
48 | kube-system cilium-tdjq4 1/1 Running 0 5m3s
49 | [root@k8s-master01 ~]#
50 | ```
51 |
52 | ### 4 下载专属监控面板
53 |
54 | ```shell
55 | [root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/1.12.1/examples/kubernetes/addons/prometheus/monitoring-example.yaml
56 | [root@k8s-master01 yaml]#
57 | [root@k8s-master01 yaml]# kubectl apply -f monitoring-example.yaml
58 | namespace/cilium-monitoring created
59 | serviceaccount/prometheus-k8s created
60 | configmap/grafana-config created
61 | configmap/grafana-cilium-dashboard created
62 | configmap/grafana-cilium-operator-dashboard created
63 | configmap/grafana-hubble-dashboard created
64 | configmap/prometheus created
65 | clusterrole.rbac.authorization.k8s.io/prometheus created
66 | clusterrolebinding.rbac.authorization.k8s.io/prometheus created
67 | service/grafana created
68 | service/prometheus created
69 | deployment.apps/grafana created
70 | deployment.apps/prometheus created
71 | [root@k8s-master01 yaml]#
72 | ```
73 |
74 | ### 5 下载部署测试用例
75 |
76 | ```shell
77 | [root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/master/examples/kubernetes/connectivity-check/connectivity-check.yaml
78 |
79 | [root@k8s-master01 yaml]# sed -i "s#google.com#baidu.cn#g" connectivity-check.yaml
80 |
81 | [root@k8s-master01 yaml]# kubectl apply -f connectivity-check.yaml
82 | deployment.apps/echo-a created
83 | deployment.apps/echo-b created
84 | deployment.apps/echo-b-host created
85 | deployment.apps/pod-to-a created
86 | deployment.apps/pod-to-external-1111 created
87 | deployment.apps/pod-to-a-denied-cnp created
88 | deployment.apps/pod-to-a-allowed-cnp created
89 | deployment.apps/pod-to-external-fqdn-allow-google-cnp created
90 | deployment.apps/pod-to-b-multi-node-clusterip created
91 | deployment.apps/pod-to-b-multi-node-headless created
92 | deployment.apps/host-to-b-multi-node-clusterip created
93 | deployment.apps/host-to-b-multi-node-headless created
94 | deployment.apps/pod-to-b-multi-node-nodeport created
95 | deployment.apps/pod-to-b-intra-node-nodeport created
96 | service/echo-a created
97 | service/echo-b created
98 | service/echo-b-headless created
99 | service/echo-b-host-headless created
100 | ciliumnetworkpolicy.cilium.io/pod-to-a-denied-cnp created
101 | ciliumnetworkpolicy.cilium.io/pod-to-a-allowed-cnp created
102 | ciliumnetworkpolicy.cilium.io/pod-to-external-fqdn-allow-google-cnp created
103 | [root@k8s-master01 yaml]#
104 | ```
105 |
106 | ### 6 查看pod
107 |
108 | ```shell
109 | [root@k8s-master01 yaml]# kubectl get pod -A
110 | NAMESPACE NAME READY STATUS RESTARTS AGE
111 | cilium-monitoring grafana-59957b9549-6zzqh 1/1 Running 0 10m
112 | cilium-monitoring prometheus-7c8c9684bb-4v9cl 1/1 Running 0 10m
113 | default chenby-75b5d7fbfb-7zjsr 1/1 Running 0 27h
114 | default chenby-75b5d7fbfb-hbvr8 1/1 Running 0 27h
115 | default chenby-75b5d7fbfb-ppbzg 1/1 Running 0 27h
116 | default echo-a-6799dff547-pnx6w 1/1 Running 0 10m
117 | default echo-b-fc47b659c-4bdg9 1/1 Running 0 10m
118 | default echo-b-host-67fcfd59b7-28r9s 1/1 Running 0 10m
119 | default host-to-b-multi-node-clusterip-69c57975d6-z4j2z 1/1 Running 0 10m
120 | default host-to-b-multi-node-headless-865899f7bb-frrmc 1/1 Running 0 10m
121 | default pod-to-a-allowed-cnp-5f9d7d4b9d-hcd8x 1/1 Running 0 10m
122 | default pod-to-a-denied-cnp-65cc5ff97b-2rzb8 1/1 Running 0 10m
123 | default pod-to-a-dfc64f564-p7xcn 1/1 Running 0 10m
124 | default pod-to-b-intra-node-nodeport-677868746b-trk2l 1/1 Running 0 10m
125 | default pod-to-b-multi-node-clusterip-76bbbc677b-knfq2 1/1 Running 0 10m
126 | default pod-to-b-multi-node-headless-698c6579fd-mmvd7 1/1 Running 0 10m
127 | default pod-to-b-multi-node-nodeport-5dc4b8cfd6-8dxmz 1/1 Running 0 10m
128 | default pod-to-external-1111-8459965778-pjt9b 1/1 Running 0 10m
129 | default pod-to-external-fqdn-allow-google-cnp-64df9fb89b-l9l4q 1/1 Running 0 10m
130 | kube-system cilium-7rfj6 1/1 Running 0 56s
131 | kube-system cilium-d4cch 1/1 Running 0 56s
132 | kube-system cilium-h5x8r 1/1 Running 0 56s
133 | kube-system cilium-operator-5dbddb6dbf-flpl5 1/1 Running 0 56s
134 | kube-system cilium-operator-5dbddb6dbf-gcznc 1/1 Running 0 56s
135 | kube-system cilium-t2xlz 1/1 Running 0 56s
136 | kube-system cilium-z65z7 1/1 Running 0 56s
137 | kube-system coredns-665475b9f8-jkqn8 1/1 Running 1 (36h ago) 36h
138 | kube-system hubble-relay-59d8575-9pl9z 1/1 Running 0 56s
139 | kube-system hubble-ui-64d4995d57-nsv9j 2/2 Running 0 56s
140 | kube-system metrics-server-776f58c94b-c6zgs 1/1 Running 1 (36h ago) 37h
141 | [root@k8s-master01 yaml]#
142 | ```
143 |
144 | ### 7 修改为NodePort
145 |
146 | ```shell
147 | [root@k8s-master01 yaml]# kubectl edit svc -n kube-system hubble-ui
148 | service/hubble-ui edited
149 | [root@k8s-master01 yaml]#
150 | [root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring grafana
151 | service/grafana edited
152 | [root@k8s-master01 yaml]#
153 | [root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring prometheus
154 | service/prometheus edited
155 | [root@k8s-master01 yaml]#
156 |
157 | type: NodePort
158 | ```
159 |
160 | ### 8 查看端口
161 |
162 | ```shell
163 | [root@k8s-master01 yaml]# kubectl get svc -A | grep monit
164 | cilium-monitoring grafana NodePort 10.100.250.17 3000:30707/TCP 15m
165 | cilium-monitoring prometheus NodePort 10.100.131.243 9090:31155/TCP 15m
166 | [root@k8s-master01 yaml]#
167 | [root@k8s-master01 yaml]# kubectl get svc -A | grep hubble
168 | kube-system hubble-metrics ClusterIP None 9965/TCP 5m12s
169 | kube-system hubble-peer ClusterIP 10.100.150.29 443/TCP 5m12s
170 | kube-system hubble-relay ClusterIP 10.109.251.34 80/TCP 5m12s
171 | kube-system hubble-ui NodePort 10.102.253.59 80:31219/TCP 5m12s
172 | [root@k8s-master01 yaml]#
173 | ```
174 |
175 | ### 9 访问
176 |
177 | ```shell
178 | http://192.168.1.61:30707
179 | http://192.168.1.61:31155
180 | http://192.168.1.61:31219
181 | ```
182 |
183 | > **关于**
184 | >
185 | > https://www.oiox.cn/
186 | >
187 | > https://www.oiox.cn/index.php/start-page.html
188 | >
189 | > **CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客**
190 | >
191 | > **全网可搜《小陈运维》**
192 | >
193 | > **文章主要发布于微信公众号**
--------------------------------------------------------------------------------
/images/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cby-chen/Kubernetes/600e9c2304bf9d067ed446f6ca4fa9a353aee6a9/images/1.jpg
--------------------------------------------------------------------------------
/images/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cby-chen/Kubernetes/600e9c2304bf9d067ed446f6ca4fa9a353aee6a9/images/2.jpg
--------------------------------------------------------------------------------
/images/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cby-chen/Kubernetes/600e9c2304bf9d067ed446f6ca4fa9a353aee6a9/images/3.jpg
--------------------------------------------------------------------------------
/ingress-yaml/backend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: default-http-backend
5 | labels:
6 | app.kubernetes.io/name: default-http-backend
7 | namespace: kube-system
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app.kubernetes.io/name: default-http-backend
13 | template:
14 | metadata:
15 | labels:
16 | app.kubernetes.io/name: default-http-backend
17 | spec:
18 | terminationGracePeriodSeconds: 60
19 | containers:
20 | - name: default-http-backend
21 | image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5
22 | livenessProbe:
23 | httpGet:
24 | path: /healthz
25 | port: 8080
26 | scheme: HTTP
27 | initialDelaySeconds: 30
28 | timeoutSeconds: 5
29 | ports:
30 | - containerPort: 8080
31 | resources:
32 | limits:
33 | cpu: 10m
34 | memory: 20Mi
35 | requests:
36 | cpu: 10m
37 | memory: 20Mi
38 | ---
39 | apiVersion: v1
40 | kind: Service
41 | metadata:
42 | name: default-http-backend
43 | namespace: kube-system
44 | labels:
45 | app.kubernetes.io/name: default-http-backend
46 | spec:
47 | ports:
48 | - port: 80
49 | targetPort: 8080
50 | selector:
51 | app.kubernetes.io/name: default-http-backend
52 |
--------------------------------------------------------------------------------
/ingress-yaml/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | app.kubernetes.io/instance: ingress-nginx
6 | app.kubernetes.io/name: ingress-nginx
7 | name: ingress-nginx
8 | ---
9 | apiVersion: v1
10 | automountServiceAccountToken: true
11 | kind: ServiceAccount
12 | metadata:
13 | labels:
14 | app.kubernetes.io/component: controller
15 | app.kubernetes.io/instance: ingress-nginx
16 | app.kubernetes.io/name: ingress-nginx
17 | app.kubernetes.io/part-of: ingress-nginx
18 | app.kubernetes.io/version: 1.8.0
19 | name: ingress-nginx
20 | namespace: ingress-nginx
21 | ---
22 | apiVersion: v1
23 | kind: ServiceAccount
24 | metadata:
25 | labels:
26 | app.kubernetes.io/component: admission-webhook
27 | app.kubernetes.io/instance: ingress-nginx
28 | app.kubernetes.io/name: ingress-nginx
29 | app.kubernetes.io/part-of: ingress-nginx
30 | app.kubernetes.io/version: 1.8.0
31 | name: ingress-nginx-admission
32 | namespace: ingress-nginx
33 | ---
34 | apiVersion: rbac.authorization.k8s.io/v1
35 | kind: Role
36 | metadata:
37 | labels:
38 | app.kubernetes.io/component: controller
39 | app.kubernetes.io/instance: ingress-nginx
40 | app.kubernetes.io/name: ingress-nginx
41 | app.kubernetes.io/part-of: ingress-nginx
42 | app.kubernetes.io/version: 1.8.0
43 | name: ingress-nginx
44 | namespace: ingress-nginx
45 | rules:
46 | - apiGroups:
47 | - ""
48 | resources:
49 | - namespaces
50 | verbs:
51 | - get
52 | - apiGroups:
53 | - ""
54 | resources:
55 | - configmaps
56 | - pods
57 | - secrets
58 | - endpoints
59 | verbs:
60 | - get
61 | - list
62 | - watch
63 | - apiGroups:
64 | - ""
65 | resources:
66 | - services
67 | verbs:
68 | - get
69 | - list
70 | - watch
71 | - apiGroups:
72 | - networking.k8s.io
73 | resources:
74 | - ingresses
75 | verbs:
76 | - get
77 | - list
78 | - watch
79 | - apiGroups:
80 | - networking.k8s.io
81 | resources:
82 | - ingresses/status
83 | verbs:
84 | - update
85 | - apiGroups:
86 | - networking.k8s.io
87 | resources:
88 | - ingressclasses
89 | verbs:
90 | - get
91 | - list
92 | - watch
93 | - apiGroups:
94 | - coordination.k8s.io
95 | resourceNames:
96 | - ingress-nginx-leader
97 | resources:
98 | - leases
99 | verbs:
100 | - get
101 | - update
102 | - apiGroups:
103 | - coordination.k8s.io
104 | resources:
105 | - leases
106 | verbs:
107 | - create
108 | - apiGroups:
109 | - ""
110 | resources:
111 | - events
112 | verbs:
113 | - create
114 | - patch
115 | - apiGroups:
116 | - discovery.k8s.io
117 | resources:
118 | - endpointslices
119 | verbs:
120 | - list
121 | - watch
122 | - get
123 | ---
124 | apiVersion: rbac.authorization.k8s.io/v1
125 | kind: Role
126 | metadata:
127 | labels:
128 | app.kubernetes.io/component: admission-webhook
129 | app.kubernetes.io/instance: ingress-nginx
130 | app.kubernetes.io/name: ingress-nginx
131 | app.kubernetes.io/part-of: ingress-nginx
132 | app.kubernetes.io/version: 1.8.0
133 | name: ingress-nginx-admission
134 | namespace: ingress-nginx
135 | rules:
136 | - apiGroups:
137 | - ""
138 | resources:
139 | - secrets
140 | verbs:
141 | - get
142 | - create
143 | ---
144 | apiVersion: rbac.authorization.k8s.io/v1
145 | kind: ClusterRole
146 | metadata:
147 | labels:
148 | app.kubernetes.io/instance: ingress-nginx
149 | app.kubernetes.io/name: ingress-nginx
150 | app.kubernetes.io/part-of: ingress-nginx
151 | app.kubernetes.io/version: 1.8.0
152 | name: ingress-nginx
153 | rules:
154 | - apiGroups:
155 | - ""
156 | resources:
157 | - configmaps
158 | - endpoints
159 | - nodes
160 | - pods
161 | - secrets
162 | - namespaces
163 | verbs:
164 | - list
165 | - watch
166 | - apiGroups:
167 | - coordination.k8s.io
168 | resources:
169 | - leases
170 | verbs:
171 | - list
172 | - watch
173 | - apiGroups:
174 | - ""
175 | resources:
176 | - nodes
177 | verbs:
178 | - get
179 | - apiGroups:
180 | - ""
181 | resources:
182 | - services
183 | verbs:
184 | - get
185 | - list
186 | - watch
187 | - apiGroups:
188 | - networking.k8s.io
189 | resources:
190 | - ingresses
191 | verbs:
192 | - get
193 | - list
194 | - watch
195 | - apiGroups:
196 | - ""
197 | resources:
198 | - events
199 | verbs:
200 | - create
201 | - patch
202 | - apiGroups:
203 | - networking.k8s.io
204 | resources:
205 | - ingresses/status
206 | verbs:
207 | - update
208 | - apiGroups:
209 | - networking.k8s.io
210 | resources:
211 | - ingressclasses
212 | verbs:
213 | - get
214 | - list
215 | - watch
216 | - apiGroups:
217 | - discovery.k8s.io
218 | resources:
219 | - endpointslices
220 | verbs:
221 | - list
222 | - watch
223 | - get
224 | ---
225 | apiVersion: rbac.authorization.k8s.io/v1
226 | kind: ClusterRole
227 | metadata:
228 | labels:
229 | app.kubernetes.io/component: admission-webhook
230 | app.kubernetes.io/instance: ingress-nginx
231 | app.kubernetes.io/name: ingress-nginx
232 | app.kubernetes.io/part-of: ingress-nginx
233 | app.kubernetes.io/version: 1.8.0
234 | name: ingress-nginx-admission
235 | rules:
236 | - apiGroups:
237 | - admissionregistration.k8s.io
238 | resources:
239 | - validatingwebhookconfigurations
240 | verbs:
241 | - get
242 | - update
243 | ---
244 | apiVersion: rbac.authorization.k8s.io/v1
245 | kind: RoleBinding
246 | metadata:
247 | labels:
248 | app.kubernetes.io/component: controller
249 | app.kubernetes.io/instance: ingress-nginx
250 | app.kubernetes.io/name: ingress-nginx
251 | app.kubernetes.io/part-of: ingress-nginx
252 | app.kubernetes.io/version: 1.8.0
253 | name: ingress-nginx
254 | namespace: ingress-nginx
255 | roleRef:
256 | apiGroup: rbac.authorization.k8s.io
257 | kind: Role
258 | name: ingress-nginx
259 | subjects:
260 | - kind: ServiceAccount
261 | name: ingress-nginx
262 | namespace: ingress-nginx
263 | ---
264 | apiVersion: rbac.authorization.k8s.io/v1
265 | kind: RoleBinding
266 | metadata:
267 | labels:
268 | app.kubernetes.io/component: admission-webhook
269 | app.kubernetes.io/instance: ingress-nginx
270 | app.kubernetes.io/name: ingress-nginx
271 | app.kubernetes.io/part-of: ingress-nginx
272 | app.kubernetes.io/version: 1.8.0
273 | name: ingress-nginx-admission
274 | namespace: ingress-nginx
275 | roleRef:
276 | apiGroup: rbac.authorization.k8s.io
277 | kind: Role
278 | name: ingress-nginx-admission
279 | subjects:
280 | - kind: ServiceAccount
281 | name: ingress-nginx-admission
282 | namespace: ingress-nginx
283 | ---
284 | apiVersion: rbac.authorization.k8s.io/v1
285 | kind: ClusterRoleBinding
286 | metadata:
287 | labels:
288 | app.kubernetes.io/instance: ingress-nginx
289 | app.kubernetes.io/name: ingress-nginx
290 | app.kubernetes.io/part-of: ingress-nginx
291 | app.kubernetes.io/version: 1.8.0
292 | name: ingress-nginx
293 | roleRef:
294 | apiGroup: rbac.authorization.k8s.io
295 | kind: ClusterRole
296 | name: ingress-nginx
297 | subjects:
298 | - kind: ServiceAccount
299 | name: ingress-nginx
300 | namespace: ingress-nginx
301 | ---
302 | apiVersion: rbac.authorization.k8s.io/v1
303 | kind: ClusterRoleBinding
304 | metadata:
305 | labels:
306 | app.kubernetes.io/component: admission-webhook
307 | app.kubernetes.io/instance: ingress-nginx
308 | app.kubernetes.io/name: ingress-nginx
309 | app.kubernetes.io/part-of: ingress-nginx
310 | app.kubernetes.io/version: 1.8.0
311 | name: ingress-nginx-admission
312 | roleRef:
313 | apiGroup: rbac.authorization.k8s.io
314 | kind: ClusterRole
315 | name: ingress-nginx-admission
316 | subjects:
317 | - kind: ServiceAccount
318 | name: ingress-nginx-admission
319 | namespace: ingress-nginx
320 | ---
321 | apiVersion: v1
322 | data:
323 | allow-snippet-annotations: "true"
324 | kind: ConfigMap
325 | metadata:
326 | labels:
327 | app.kubernetes.io/component: controller
328 | app.kubernetes.io/instance: ingress-nginx
329 | app.kubernetes.io/name: ingress-nginx
330 | app.kubernetes.io/part-of: ingress-nginx
331 | app.kubernetes.io/version: 1.8.0
332 | name: ingress-nginx-controller
333 | namespace: ingress-nginx
334 | ---
335 | apiVersion: v1
336 | kind: Service
337 | metadata:
338 | labels:
339 | app.kubernetes.io/component: controller
340 | app.kubernetes.io/instance: ingress-nginx
341 | app.kubernetes.io/name: ingress-nginx
342 | app.kubernetes.io/part-of: ingress-nginx
343 | app.kubernetes.io/version: 1.8.0
344 | name: ingress-nginx-controller
345 | namespace: ingress-nginx
346 | spec:
347 | externalTrafficPolicy: Local
348 | ipFamilies:
349 | - IPv4
350 | ipFamilyPolicy: SingleStack
351 | ports:
352 | - appProtocol: http
353 | name: http
354 | port: 80
355 | protocol: TCP
356 | targetPort: http
357 | - appProtocol: https
358 | name: https
359 | port: 443
360 | protocol: TCP
361 | targetPort: https
362 | selector:
363 | app.kubernetes.io/component: controller
364 | app.kubernetes.io/instance: ingress-nginx
365 | app.kubernetes.io/name: ingress-nginx
366 | type: LoadBalancer
367 | ---
368 | apiVersion: v1
369 | kind: Service
370 | metadata:
371 | labels:
372 | app.kubernetes.io/component: controller
373 | app.kubernetes.io/instance: ingress-nginx
374 | app.kubernetes.io/name: ingress-nginx
375 | app.kubernetes.io/part-of: ingress-nginx
376 | app.kubernetes.io/version: 1.8.0
377 | name: ingress-nginx-controller-admission
378 | namespace: ingress-nginx
379 | spec:
380 | ports:
381 | - appProtocol: https
382 | name: https-webhook
383 | port: 443
384 | targetPort: webhook
385 | selector:
386 | app.kubernetes.io/component: controller
387 | app.kubernetes.io/instance: ingress-nginx
388 | app.kubernetes.io/name: ingress-nginx
389 | type: ClusterIP
390 | ---
391 | apiVersion: apps/v1
392 | kind: Deployment
393 | metadata:
394 | labels:
395 | app.kubernetes.io/component: controller
396 | app.kubernetes.io/instance: ingress-nginx
397 | app.kubernetes.io/name: ingress-nginx
398 | app.kubernetes.io/part-of: ingress-nginx
399 | app.kubernetes.io/version: 1.8.0
400 | name: ingress-nginx-controller
401 | namespace: ingress-nginx
402 | spec:
403 | minReadySeconds: 0
404 | revisionHistoryLimit: 10
405 | selector:
406 | matchLabels:
407 | app.kubernetes.io/component: controller
408 | app.kubernetes.io/instance: ingress-nginx
409 | app.kubernetes.io/name: ingress-nginx
410 | template:
411 | metadata:
412 | labels:
413 | app.kubernetes.io/component: controller
414 | app.kubernetes.io/instance: ingress-nginx
415 | app.kubernetes.io/name: ingress-nginx
416 | app.kubernetes.io/part-of: ingress-nginx
417 | app.kubernetes.io/version: 1.8.0
418 | spec:
419 | containers:
420 | - args:
421 | - /nginx-ingress-controller
422 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
423 | - --election-id=ingress-nginx-leader
424 | - --controller-class=k8s.io/ingress-nginx
425 | - --ingress-class=nginx
426 | - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
427 | - --validating-webhook=:8443
428 | - --validating-webhook-certificate=/usr/local/certificates/cert
429 | - --validating-webhook-key=/usr/local/certificates/key
430 | env:
431 | - name: POD_NAME
432 | valueFrom:
433 | fieldRef:
434 | fieldPath: metadata.name
435 | - name: POD_NAMESPACE
436 | valueFrom:
437 | fieldRef:
438 | fieldPath: metadata.namespace
439 | - name: LD_PRELOAD
440 | value: /usr/local/lib/libmimalloc.so
441 | image: m.daocloud.io/registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3
442 | imagePullPolicy: IfNotPresent
443 | lifecycle:
444 | preStop:
445 | exec:
446 | command:
447 | - /wait-shutdown
448 | livenessProbe:
449 | failureThreshold: 5
450 | httpGet:
451 | path: /healthz
452 | port: 10254
453 | scheme: HTTP
454 | initialDelaySeconds: 10
455 | periodSeconds: 10
456 | successThreshold: 1
457 | timeoutSeconds: 1
458 | name: controller
459 | ports:
460 | - containerPort: 80
461 | name: http
462 | protocol: TCP
463 | - containerPort: 443
464 | name: https
465 | protocol: TCP
466 | - containerPort: 8443
467 | name: webhook
468 | protocol: TCP
469 | readinessProbe:
470 | failureThreshold: 3
471 | httpGet:
472 | path: /healthz
473 | port: 10254
474 | scheme: HTTP
475 | initialDelaySeconds: 10
476 | periodSeconds: 10
477 | successThreshold: 1
478 | timeoutSeconds: 1
479 | resources:
480 | requests:
481 | cpu: 100m
482 | memory: 90Mi
483 | securityContext:
484 | allowPrivilegeEscalation: true
485 | capabilities:
486 | add:
487 | - NET_BIND_SERVICE
488 | drop:
489 | - ALL
490 | runAsUser: 101
491 | volumeMounts:
492 | - mountPath: /usr/local/certificates/
493 | name: webhook-cert
494 | readOnly: true
495 | dnsPolicy: ClusterFirst
496 | nodeSelector:
497 | kubernetes.io/os: linux
498 | serviceAccountName: ingress-nginx
499 | terminationGracePeriodSeconds: 300
500 | volumes:
501 | - name: webhook-cert
502 | secret:
503 | secretName: ingress-nginx-admission
504 | ---
505 | apiVersion: batch/v1
506 | kind: Job
507 | metadata:
508 | labels:
509 | app.kubernetes.io/component: admission-webhook
510 | app.kubernetes.io/instance: ingress-nginx
511 | app.kubernetes.io/name: ingress-nginx
512 | app.kubernetes.io/part-of: ingress-nginx
513 | app.kubernetes.io/version: 1.8.0
514 | name: ingress-nginx-admission-create
515 | namespace: ingress-nginx
516 | spec:
517 | template:
518 | metadata:
519 | labels:
520 | app.kubernetes.io/component: admission-webhook
521 | app.kubernetes.io/instance: ingress-nginx
522 | app.kubernetes.io/name: ingress-nginx
523 | app.kubernetes.io/part-of: ingress-nginx
524 | app.kubernetes.io/version: 1.8.0
525 | name: ingress-nginx-admission-create
526 | spec:
527 | containers:
528 | - args:
529 | - create
530 | - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
531 | - --namespace=$(POD_NAMESPACE)
532 | - --secret-name=ingress-nginx-admission
533 | env:
534 | - name: POD_NAMESPACE
535 | valueFrom:
536 | fieldRef:
537 | fieldPath: metadata.namespace
538 | image: m.daocloud.io/registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
539 | imagePullPolicy: IfNotPresent
540 | name: create
541 | securityContext:
542 | allowPrivilegeEscalation: false
543 | nodeSelector:
544 | kubernetes.io/os: linux
545 | restartPolicy: OnFailure
546 | securityContext:
547 | fsGroup: 2000
548 | runAsNonRoot: true
549 | runAsUser: 2000
550 | serviceAccountName: ingress-nginx-admission
551 | ---
552 | apiVersion: batch/v1
553 | kind: Job
554 | metadata:
555 | labels:
556 | app.kubernetes.io/component: admission-webhook
557 | app.kubernetes.io/instance: ingress-nginx
558 | app.kubernetes.io/name: ingress-nginx
559 | app.kubernetes.io/part-of: ingress-nginx
560 | app.kubernetes.io/version: 1.8.0
561 | name: ingress-nginx-admission-patch
562 | namespace: ingress-nginx
563 | spec:
564 | template:
565 | metadata:
566 | labels:
567 | app.kubernetes.io/component: admission-webhook
568 | app.kubernetes.io/instance: ingress-nginx
569 | app.kubernetes.io/name: ingress-nginx
570 | app.kubernetes.io/part-of: ingress-nginx
571 | app.kubernetes.io/version: 1.8.0
572 | name: ingress-nginx-admission-patch
573 | spec:
574 | containers:
575 | - args:
576 | - patch
577 | - --webhook-name=ingress-nginx-admission
578 | - --namespace=$(POD_NAMESPACE)
579 | - --patch-mutating=false
580 | - --secret-name=ingress-nginx-admission
581 | - --patch-failure-policy=Fail
582 | env:
583 | - name: POD_NAMESPACE
584 | valueFrom:
585 | fieldRef:
586 | fieldPath: metadata.namespace
587 | image: m.daocloud.io/registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
588 | imagePullPolicy: IfNotPresent
589 | name: patch
590 | securityContext:
591 | allowPrivilegeEscalation: false
592 | nodeSelector:
593 | kubernetes.io/os: linux
594 | restartPolicy: OnFailure
595 | securityContext:
596 | fsGroup: 2000
597 | runAsNonRoot: true
598 | runAsUser: 2000
599 | serviceAccountName: ingress-nginx-admission
600 | ---
601 | apiVersion: networking.k8s.io/v1
602 | kind: IngressClass
603 | metadata:
604 | labels:
605 | app.kubernetes.io/component: controller
606 | app.kubernetes.io/instance: ingress-nginx
607 | app.kubernetes.io/name: ingress-nginx
608 | app.kubernetes.io/part-of: ingress-nginx
609 | app.kubernetes.io/version: 1.8.0
610 | name: nginx
611 | spec:
612 | controller: k8s.io/ingress-nginx
613 | ---
614 | apiVersion: admissionregistration.k8s.io/v1
615 | kind: ValidatingWebhookConfiguration
616 | metadata:
617 | labels:
618 | app.kubernetes.io/component: admission-webhook
619 | app.kubernetes.io/instance: ingress-nginx
620 | app.kubernetes.io/name: ingress-nginx
621 | app.kubernetes.io/part-of: ingress-nginx
622 | app.kubernetes.io/version: 1.8.0
623 | name: ingress-nginx-admission
624 | webhooks:
625 | - admissionReviewVersions:
626 | - v1
627 | clientConfig:
628 | service:
629 | name: ingress-nginx-controller-admission
630 | namespace: ingress-nginx
631 | path: /networking/v1/ingresses
632 | failurePolicy: Fail
633 | matchPolicy: Equivalent
634 | name: validate.nginx.ingress.kubernetes.io
635 | rules:
636 | - apiGroups:
637 | - networking.k8s.io
638 | apiVersions:
639 | - v1
640 | operations:
641 | - CREATE
642 | - UPDATE
643 | resources:
644 | - ingresses
645 | sideEffects: None
646 |
--------------------------------------------------------------------------------
/ingress-yaml/ingress-demo-app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hello-server
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hello-server
10 | template:
11 | metadata:
12 | labels:
13 | app: hello-server
14 | spec:
15 | containers:
16 | - name: hello-server
17 | image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
18 | ports:
19 | - containerPort: 9000
20 | ---
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | labels:
25 | app: nginx-demo
26 | name: nginx-demo
27 | spec:
28 | replicas: 2
29 | selector:
30 | matchLabels:
31 | app: nginx-demo
32 | template:
33 | metadata:
34 | labels:
35 | app: nginx-demo
36 | spec:
37 | containers:
38 | - image: nginx
39 | name: nginx
40 | ---
41 | apiVersion: v1
42 | kind: Service
43 | metadata:
44 | labels:
45 | app: nginx-demo
46 | name: nginx-demo
47 | spec:
48 | selector:
49 | app: nginx-demo
50 | ports:
51 | - port: 8000
52 | protocol: TCP
53 | targetPort: 80
54 | ---
55 | apiVersion: v1
56 | kind: Service
57 | metadata:
58 | labels:
59 | app: hello-server
60 | name: hello-server
61 | spec:
62 | selector:
63 | app: hello-server
64 | ports:
65 | - port: 8000
66 | protocol: TCP
67 | targetPort: 9000
68 | ---
69 | apiVersion: networking.k8s.io/v1
70 | kind: Ingress
71 | metadata:
72 | name: ingress-host-bar
73 | spec:
74 | ingressClassName: nginx
75 | rules:
76 | - host: "hello.chenby.cn"
77 | http:
78 | paths:
79 | - pathType: Prefix
80 | path: "/"
81 | backend:
82 | service:
83 | name: hello-server
84 | port:
85 | number: 8000
86 | - host: "demo.chenby.cn"
87 | http:
88 | paths:
89 | - pathType: Prefix
90 | path: "/nginx"
91 | backend:
92 | service:
93 | name: nginx-demo
94 | port:
95 | number: 8000
96 |
--------------------------------------------------------------------------------
/metrics-server/high-availability.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | k8s-app: metrics-server
6 | name: metrics-server
7 | namespace: kube-system
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRole
11 | metadata:
12 | labels:
13 | k8s-app: metrics-server
14 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
15 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
16 | rbac.authorization.k8s.io/aggregate-to-view: "true"
17 | name: system:aggregated-metrics-reader
18 | rules:
19 | - apiGroups:
20 | - metrics.k8s.io
21 | resources:
22 | - pods
23 | - nodes
24 | verbs:
25 | - get
26 | - list
27 | - watch
28 | ---
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | kind: ClusterRole
31 | metadata:
32 | labels:
33 | k8s-app: metrics-server
34 | name: system:metrics-server
35 | rules:
36 | - apiGroups:
37 | - ""
38 | resources:
39 | - nodes/metrics
40 | verbs:
41 | - get
42 | - apiGroups:
43 | - ""
44 | resources:
45 | - pods
46 | - nodes
47 | verbs:
48 | - get
49 | - list
50 | - watch
51 | ---
52 | apiVersion: rbac.authorization.k8s.io/v1
53 | kind: RoleBinding
54 | metadata:
55 | labels:
56 | k8s-app: metrics-server
57 | name: metrics-server-auth-reader
58 | namespace: kube-system
59 | roleRef:
60 | apiGroup: rbac.authorization.k8s.io
61 | kind: Role
62 | name: extension-apiserver-authentication-reader
63 | subjects:
64 | - kind: ServiceAccount
65 | name: metrics-server
66 | namespace: kube-system
67 | ---
68 | apiVersion: rbac.authorization.k8s.io/v1
69 | kind: ClusterRoleBinding
70 | metadata:
71 | labels:
72 | k8s-app: metrics-server
73 | name: metrics-server:system:auth-delegator
74 | roleRef:
75 | apiGroup: rbac.authorization.k8s.io
76 | kind: ClusterRole
77 | name: system:auth-delegator
78 | subjects:
79 | - kind: ServiceAccount
80 | name: metrics-server
81 | namespace: kube-system
82 | ---
83 | apiVersion: rbac.authorization.k8s.io/v1
84 | kind: ClusterRoleBinding
85 | metadata:
86 | labels:
87 | k8s-app: metrics-server
88 | name: system:metrics-server
89 | roleRef:
90 | apiGroup: rbac.authorization.k8s.io
91 | kind: ClusterRole
92 | name: system:metrics-server
93 | subjects:
94 | - kind: ServiceAccount
95 | name: metrics-server
96 | namespace: kube-system
97 | ---
98 | apiVersion: v1
99 | kind: Service
100 | metadata:
101 | labels:
102 | k8s-app: metrics-server
103 | name: metrics-server
104 | namespace: kube-system
105 | spec:
106 | ports:
107 | - name: https
108 | port: 443
109 | protocol: TCP
110 | targetPort: https
111 | selector:
112 | k8s-app: metrics-server
113 | ---
114 | apiVersion: apps/v1
115 | kind: Deployment
116 | metadata:
117 | labels:
118 | k8s-app: metrics-server
119 | name: metrics-server
120 | namespace: kube-system
121 | spec:
122 | replicas: 2
123 | selector:
124 | matchLabels:
125 | k8s-app: metrics-server
126 | strategy:
127 | rollingUpdate:
128 | maxUnavailable: 1
129 | template:
130 | metadata:
131 | labels:
132 | k8s-app: metrics-server
133 | spec:
134 | affinity:
135 | podAntiAffinity:
136 | requiredDuringSchedulingIgnoredDuringExecution:
137 | - labelSelector:
138 | matchLabels:
139 | k8s-app: metrics-server
140 | namespaces:
141 | - kube-system
142 | topologyKey: kubernetes.io/hostname
143 | containers:
144 | - args:
145 | - --cert-dir=/tmp
146 | - --secure-port=4443
147 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
148 | - --kubelet-use-node-status-port
149 | - --metric-resolution=15s
150 | - --kubelet-insecure-tls
151 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
152 | - --requestheader-username-headers=X-Remote-User
153 | - --requestheader-group-headers=X-Remote-Group
154 | - --requestheader-extra-headers-prefix=X-Remote-Extra-
155 | image: m.daocloud.io/registry.k8s.io/metrics-server/metrics-server:v0.6.3
156 | imagePullPolicy: IfNotPresent
157 | livenessProbe:
158 | failureThreshold: 3
159 | httpGet:
160 | path: /livez
161 | port: https
162 | scheme: HTTPS
163 | periodSeconds: 10
164 | name: metrics-server
165 | ports:
166 | - containerPort: 4443
167 | name: https
168 | protocol: TCP
169 | readinessProbe:
170 | failureThreshold: 3
171 | httpGet:
172 | path: /readyz
173 | port: https
174 | scheme: HTTPS
175 | initialDelaySeconds: 20
176 | periodSeconds: 10
177 | resources:
178 | requests:
179 | cpu: 100m
180 | memory: 200Mi
181 | securityContext:
182 | allowPrivilegeEscalation: false
183 | readOnlyRootFilesystem: true
184 | runAsNonRoot: true
185 | runAsUser: 1000
186 | volumeMounts:
187 | - mountPath: /tmp
188 | name: tmp-dir
189 | - name: ca-ssl
190 | mountPath: /etc/kubernetes/pki
191 | nodeSelector:
192 | kubernetes.io/os: linux
193 | priorityClassName: system-cluster-critical
194 | serviceAccountName: metrics-server
195 | volumes:
196 | - emptyDir: {}
197 | name: tmp-dir
198 | - name: ca-ssl
199 | hostPath:
200 | path: /etc/kubernetes/pki
201 | ---
202 | apiVersion: policy/v1beta1
203 | kind: PodDisruptionBudget
204 | metadata:
205 | name: metrics-server
206 | namespace: kube-system
207 | spec:
208 | minAvailable: 1
209 | selector:
210 | matchLabels:
211 | k8s-app: metrics-server
212 | ---
213 | apiVersion: apiregistration.k8s.io/v1
214 | kind: APIService
215 | metadata:
216 | labels:
217 | k8s-app: metrics-server
218 | name: v1beta1.metrics.k8s.io
219 | spec:
220 | group: metrics.k8s.io
221 | groupPriorityMinimum: 100
222 | insecureSkipTLSVerify: true
223 | service:
224 | name: metrics-server
225 | namespace: kube-system
226 | version: v1beta1
227 | versionPriority: 100
228 |
--------------------------------------------------------------------------------
/metrics-server/metrics-server-components.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | k8s-app: metrics-server
6 | name: metrics-server
7 | namespace: kube-system
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRole
11 | metadata:
12 | labels:
13 | k8s-app: metrics-server
14 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
15 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
16 | rbac.authorization.k8s.io/aggregate-to-view: "true"
17 | name: system:aggregated-metrics-reader
18 | rules:
19 | - apiGroups:
20 | - metrics.k8s.io
21 | resources:
22 | - pods
23 | - nodes
24 | verbs:
25 | - get
26 | - list
27 | - watch
28 | ---
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | kind: ClusterRole
31 | metadata:
32 | labels:
33 | k8s-app: metrics-server
34 | name: system:metrics-server
35 | rules:
36 | - apiGroups:
37 | - ""
38 | resources:
39 | - nodes/metrics
40 | verbs:
41 | - get
42 | - apiGroups:
43 | - ""
44 | resources:
45 | - pods
46 | - nodes
47 | verbs:
48 | - get
49 | - list
50 | - watch
51 | ---
52 | apiVersion: rbac.authorization.k8s.io/v1
53 | kind: RoleBinding
54 | metadata:
55 | labels:
56 | k8s-app: metrics-server
57 | name: metrics-server-auth-reader
58 | namespace: kube-system
59 | roleRef:
60 | apiGroup: rbac.authorization.k8s.io
61 | kind: Role
62 | name: extension-apiserver-authentication-reader
63 | subjects:
64 | - kind: ServiceAccount
65 | name: metrics-server
66 | namespace: kube-system
67 | ---
68 | apiVersion: rbac.authorization.k8s.io/v1
69 | kind: ClusterRoleBinding
70 | metadata:
71 | labels:
72 | k8s-app: metrics-server
73 | name: metrics-server:system:auth-delegator
74 | roleRef:
75 | apiGroup: rbac.authorization.k8s.io
76 | kind: ClusterRole
77 | name: system:auth-delegator
78 | subjects:
79 | - kind: ServiceAccount
80 | name: metrics-server
81 | namespace: kube-system
82 | ---
83 | apiVersion: rbac.authorization.k8s.io/v1
84 | kind: ClusterRoleBinding
85 | metadata:
86 | labels:
87 | k8s-app: metrics-server
88 | name: system:metrics-server
89 | roleRef:
90 | apiGroup: rbac.authorization.k8s.io
91 | kind: ClusterRole
92 | name: system:metrics-server
93 | subjects:
94 | - kind: ServiceAccount
95 | name: metrics-server
96 | namespace: kube-system
97 | ---
98 | apiVersion: v1
99 | kind: Service
100 | metadata:
101 | labels:
102 | k8s-app: metrics-server
103 | name: metrics-server
104 | namespace: kube-system
105 | spec:
106 | ports:
107 | - name: https
108 | port: 443
109 | protocol: TCP
110 | targetPort: https
111 | selector:
112 | k8s-app: metrics-server
113 | ---
114 | apiVersion: apps/v1
115 | kind: Deployment
116 | metadata:
117 | labels:
118 | k8s-app: metrics-server
119 | name: metrics-server
120 | namespace: kube-system
121 | spec:
122 | selector:
123 | matchLabels:
124 | k8s-app: metrics-server
125 | strategy:
126 | rollingUpdate:
127 | maxUnavailable: 0
128 | template:
129 | metadata:
130 | labels:
131 | k8s-app: metrics-server
132 | spec:
133 | containers:
134 | - args:
135 | - --cert-dir=/tmp
136 | - --secure-port=4443
137 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
138 | - --kubelet-use-node-status-port
139 | - --metric-resolution=15s
140 | - --kubelet-insecure-tls
141 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
142 | - --requestheader-username-headers=X-Remote-User
143 | - --requestheader-group-headers=X-Remote-Group
144 | - --requestheader-extra-headers-prefix=X-Remote-Extra-
145 | image: m.daocloud.io/registry.k8s.io/metrics-server/metrics-server:v0.6.3
146 | imagePullPolicy: IfNotPresent
147 | livenessProbe:
148 | failureThreshold: 3
149 | httpGet:
150 | path: /livez
151 | port: https
152 | scheme: HTTPS
153 | periodSeconds: 10
154 | name: metrics-server
155 | ports:
156 | - containerPort: 4443
157 | name: https
158 | protocol: TCP
159 | readinessProbe:
160 | failureThreshold: 3
161 | httpGet:
162 | path: /readyz
163 | port: https
164 | scheme: HTTPS
165 | initialDelaySeconds: 20
166 | periodSeconds: 10
167 | resources:
168 | requests:
169 | cpu: 100m
170 | memory: 200Mi
171 | securityContext:
172 | allowPrivilegeEscalation: false
173 | readOnlyRootFilesystem: true
174 | runAsNonRoot: true
175 | runAsUser: 1000
176 | volumeMounts:
177 | - mountPath: /tmp
178 | name: tmp-dir
179 | - name: ca-ssl
180 | mountPath: /etc/kubernetes/pki
181 | nodeSelector:
182 | kubernetes.io/os: linux
183 | priorityClassName: system-cluster-critical
184 | serviceAccountName: metrics-server
185 | volumes:
186 | - emptyDir: {}
187 | name: tmp-dir
188 | - name: ca-ssl
189 | hostPath:
190 | path: /etc/kubernetes/pki
191 | ---
192 | apiVersion: apiregistration.k8s.io/v1
193 | kind: APIService
194 | metadata:
195 | labels:
196 | k8s-app: metrics-server
197 | name: v1beta1.metrics.k8s.io
198 | spec:
199 | group: metrics.k8s.io
200 | groupPriorityMinimum: 100
201 | insecureSkipTLSVerify: true
202 | service:
203 | name: metrics-server
204 | namespace: kube-system
205 | version: v1beta1
206 | versionPriority: 100
207 |
--------------------------------------------------------------------------------
/pki/admin-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "admin",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "system:masters",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/pki/apiserver-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kube-apiserver",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "Kubernetes",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/pki/ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "876000h"
5 | },
6 | "profiles": {
7 | "kubernetes": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth",
12 | "client auth"
13 | ],
14 | "expiry": "876000h"
15 | }
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/pki/ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "Kubernetes",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ],
16 | "ca": {
17 | "expiry": "876000h"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/pki/etcd-ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "etcd",
13 | "OU": "Etcd Security"
14 | }
15 | ],
16 | "ca": {
17 | "expiry": "876000h"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/pki/etcd-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "etcd",
13 | "OU": "Etcd Security"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/pki/front-proxy-ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "ca": {
8 | "expiry": "876000h"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/pki/front-proxy-client-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "front-proxy-client",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/pki/kube-proxy-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-proxy",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "system:kube-proxy",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/pki/kubelet-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:node:$NODE",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "L": "Beijing",
11 | "ST": "Beijing",
12 | "O": "system:nodes",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/pki/manager-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-controller-manager",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "system:kube-controller-manager",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/pki/scheduler-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-scheduler",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Beijing",
11 | "L": "Beijing",
12 | "O": "system:kube-scheduler",
13 | "OU": "Kubernetes-manual"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/shell/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 查看版本地址:
4 | #
5 | # https://github.com/containernetworking/plugins/releases/
6 | # https://github.com/containerd/containerd/releases/
7 | # https://github.com/kubernetes-sigs/cri-tools/releases/
8 | # https://github.com/Mirantis/cri-dockerd/releases/
9 | # https://github.com/etcd-io/etcd/releases/
10 | # https://github.com/cloudflare/cfssl/releases/
11 | # https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
12 | # https://download.docker.com/linux/static/stable/x86_64/
13 | # https://github.com/opencontainers/runc/releases/
14 | # https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/
15 | # https://github.com/helm/helm/tags
16 | # http://nginx.org/download/
17 |
18 | # Version numbers
19 | cni_plugins_version='v1.3.0'
20 | cri_containerd_cni_version='1.7.3'
21 | crictl_version='v1.28.0'
22 | cri_dockerd_version='0.3.4'
23 | etcd_version='v3.5.9'
24 | cfssl_version='1.6.4'
25 | kubernetes_server_version='1.28.0'
26 | docker_version='24.0.5'
27 | runc_version='1.1.9'
28 | kernel_version='5.4.254'
29 | helm_version='3.12.3'
30 | nginx_version='1.25.2'
31 |
32 | # URLs
33 | base_url='https://mirrors.chenby.cn/https://github.com'
34 | kernel_url="http://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-${kernel_version}-1.el7.elrepo.x86_64.rpm"
35 | runc_url="${base_url}/opencontainers/runc/releases/download/v${runc_version}/runc.amd64"
36 | docker_url="https://download.docker.com/linux/static/stable/x86_64/docker-${docker_version}.tgz"
37 | cni_plugins_url="${base_url}/containernetworking/plugins/releases/download/${cni_plugins_version}/cni-plugins-linux-amd64-${cni_plugins_version}.tgz"
38 | cri_containerd_cni_url="${base_url}/containerd/containerd/releases/download/v${cri_containerd_cni_version}/cri-containerd-cni-${cri_containerd_cni_version}-linux-amd64.tar.gz"
39 | crictl_url="${base_url}/kubernetes-sigs/cri-tools/releases/download/${crictl_version}/crictl-${crictl_version}-linux-amd64.tar.gz"
40 | cri_dockerd_url="${base_url}/Mirantis/cri-dockerd/releases/download/v${cri_dockerd_version}/cri-dockerd-${cri_dockerd_version}.amd64.tgz"
41 | etcd_url="${base_url}/etcd-io/etcd/releases/download/${etcd_version}/etcd-${etcd_version}-linux-amd64.tar.gz"
42 | cfssl_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssl_${cfssl_version}_linux_amd64"
43 | cfssljson_url="${base_url}/cloudflare/cfssl/releases/download/v${cfssl_version}/cfssljson_${cfssl_version}_linux_amd64"
44 | helm_url="https://mirrors.huaweicloud.com/helm/v${helm_version}/helm-v${helm_version}-linux-amd64.tar.gz"
45 | kubernetes_server_url="https://storage.googleapis.com/kubernetes-release/release/v${kubernetes_server_version}/kubernetes-server-linux-amd64.tar.gz"
46 | nginx_url="http://nginx.org/download/nginx-${nginx_version}.tar.gz"
47 |
48 | # Download packages
49 | packages=(
50 | $kernel_url
51 | $runc_url
52 | $docker_url
53 | $cni_plugins_url
54 | $cri_containerd_cni_url
55 | $crictl_url
56 | $cri_dockerd_url
57 | $etcd_url
58 | $cfssl_url
59 | $cfssljson_url
60 | $helm_url
61 | $kubernetes_server_url
62 | $nginx_url
63 | )
64 |
65 | for package_url in "${packages[@]}"; do
66 | filename=$(basename "$package_url")
67 | if curl --parallel --parallel-immediate -k -L -C - -o "$filename" "$package_url"; then
68 | echo "Downloaded $filename"
69 | else
70 | echo "Failed to download $filename"
71 | exit 1
72 | fi
73 | done
--------------------------------------------------------------------------------
/shell/update_k8s.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ###
4 | # 作者:陈步云
5 | # 微信:15648907522
6 | # 更新k8s集群,目前脚本支持小版本之间的更新
7 | #
8 | #
9 | # 注意!!!!
10 | # 更新时候服务会重启
11 |
12 | # 升级小版本
13 | export k8s='1.27.4'
14 |
15 | # 服务器地址
16 | export All="192.168.0.31 192.168.0.32 192.168.0.33 192.168.0.34 192.168.0.35"
17 | export Master='192.168.0.31 192.168.0.32 192.168.0.33'
18 | export Work='192.168.0.34 192.168.0.35'
19 |
20 | # 服务器的密码
21 | export SSHPASS=123123
22 |
23 |
24 | echo '开始安装免密工具'
25 |
26 | # 判断系统类型并进行安装
27 | os=$(cat /etc/os-release 2>/dev/null | grep ^ID= | awk -F= '{print $2}')
28 | if [ "$os" = "\"centos\"" ]; then
29 | yum update -y ; yum install -y sshpass
30 | fi
31 | if [ "$os" = "ubuntu" ]; then
32 | apt update -y ; apt install -y sshpass
33 | fi
34 |
35 | # 配置免密登录
36 | ssh-keygen -f /root/.ssh/id_rsa -P '' -y
37 | for HOST in ${All};do
38 | sshpass -f -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
39 | done
40 |
41 |
42 | echo '开始下载所需包'
43 |
44 | # 创建工作目录
45 | mkdir -p update_k8s && cd update_k8s
46 |
47 | # 下载所需版本
48 | if [ -e "kubernetes-server-linux-amd64.tar.gz" ]; then
49 | echo "文件存在"
50 | tar xf kubernetes-server-linux-amd64.tar.gz
51 | else
52 | echo "文件不存在"
53 | wget https://dl.k8s.io/v${k8s}/kubernetes-server-linux-amd64.tar.gz && tar xf kubernetes-server-linux-amd64.tar.gz
54 | fi
55 |
56 | echo '开始更新集群'
57 |
58 |
59 | # 拷贝所需安装包并重启
60 | for master in ${Master}; do
61 | # 停止服务...
62 | ssh ${master} "systemctl stop kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy"
63 | # 分发安装包...
64 | scp kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} ${master}:/usr/local/bin/
65 | # 启动服务...
66 | ssh ${master} "systemctl restart kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy"
67 | done
68 |
69 | # 拷贝所需安装包并重启
70 | for work in ${Work}; do
71 | # 停止服务...
72 | ssh ${work} "systemctl stop kubelet kube-proxy"
73 | # 分发安装包...
74 | scp kubernetes/server/bin/kube{let,-proxy} ${work}:/usr/local/bin/
75 | # 启动服务...
76 | ssh ${work} "systemctl restart kubelet kube-proxy"
77 | done
78 |
79 | echo '更新完成,`kubectl get node`看一下结果吧!'
80 |
--------------------------------------------------------------------------------
/yaml/PHP-Nginx-Deployment-ConfMap-Service.yaml:
--------------------------------------------------------------------------------
1 | kind: Service # 对象类型
2 | apiVersion: v1 # api 版本
3 | metadata: # 元数据
4 | name: php-fpm-nginx #Service 服务名
5 | spec:
6 | type: NodePort # 类型为nodeport
7 | selector: #标签选择器
8 | app: php-fpm-nginx
9 | ports: #端口信息
10 | - port: 80 # 容器端口80
11 | protocol: TCP #tcp类型
12 | targetPort: 80 # Service 将 nginx 容器的 80 端口暴露出来
13 | ---
14 | kind: ConfigMap # 对象类型
15 | apiVersion: v1 # api 版本
16 | metadata: # 元数据
17 | name: nginx-config # 对象名称
18 | data: # key-value 数据集合
19 | nginx.conf: | # 将 nginx config 配置写入 ConfigMap 中,经典的 php-fpm 代理设置,这里就不再多说了
20 | user nginx;
21 | worker_processes auto;
22 | error_log /var/log/nginx/error.log notice;
23 | pid /var/run/nginx.pid;
24 | events {
25 | worker_connections 1024;
26 | }
27 | http {
28 | include /etc/nginx/mime.types;
29 | default_type application/octet-stream;
30 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
31 | '$status $body_bytes_sent "$http_referer" '
32 | '"$http_user_agent" "$http_x_forwarded_for"';
33 | access_log /var/log/nginx/access.log main;
34 | sendfile on;
35 | keepalive_timeout 65;
36 | server {
37 | listen 80 default_server;
38 | listen [::]:80 default_server;
39 | root /var/www/html;
40 | index index.php;
41 | server_name _;
42 | if (-f $request_filename/index.html) {
43 | rewrite (.*) $1/index.html break;
44 | }
45 | if (-f $request_filename/index.php) {
46 | rewrite (.*) $1/index.php;
47 | }
48 | if (!-f $request_filename) {
49 | rewrite (.*) /index.php;
50 | }
51 | location / {
52 | try_files $uri $uri/ =404;
53 | }
54 | location ~ \.php$ {
55 | include fastcgi_params;
56 | fastcgi_param REQUEST_METHOD $request_method;
57 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
58 | fastcgi_pass 127.0.0.1:9000;
59 | }
60 | }
61 | include /etc/nginx/conf.d/*.conf;
62 | }
63 |
64 | ---
65 | kind: Deployment # 对象类型
66 | apiVersion: apps/v1 # api 版本
67 | metadata: # 元数据
68 | name: php-fpm-nginx # Deployment 对象名称
69 | spec: # Deployment 对象规约
70 | selector: # 选择器
71 | matchLabels: # 标签匹配
72 | app: php-fpm-nginx
73 | replicas: 3 # 副本数量
74 | template: # 模版
75 | metadata: # Pod 对象的元数据
76 | labels: # Pod 对象的标签
77 | app: php-fpm-nginx
78 | spec: # Pod 对象规约
79 | containers: # 这里设置了两个容器
80 | - name: php-fpm # 第一个容器名称
81 | image: php:7.4.29-fpm # 容器镜像
82 | imagePullPolicy: IfNotPresent #镜像拉取策略
83 | livenessProbe: # 存活探测
84 | initialDelaySeconds: 5 # 容器启动后要等待多少秒后才启动存活和就绪探测器
85 | periodSeconds: 10 # 每多少秒执行一次存活探测
86 | tcpSocket: # 监测tcp端口
87 | port: 9000 #监测端口
88 | readinessProbe: # 就绪探测
89 | initialDelaySeconds: 5 # 容器启动后要等待多少秒后才启动存活和就绪探测器
90 | periodSeconds: 10 # 每多少秒执行一次存活探测
91 | tcpSocket: # 监测tcp端口
92 | port: 9000 #监测端口
93 | resources: # 资源约束
94 | requests: # 最小限制
95 | memory: "64Mi" # 内存最新64M
96 | cpu: "250m" # CPU最大使用0.25核
97 | limits: # 最大限制
98 | memory: "128Mi" # 内存最新128M
99 | cpu: "500m" # CPU最大使用0.5核
100 | ports:
101 | - containerPort: 9000 # php-fpm 端口
102 | volumeMounts: # 挂载数据卷
103 | - mountPath: /var/www/html # 挂载两个容器共享的 volume
104 | name: nginx-www
105 | lifecycle: # 生命周期
106 | postStart: # 当容器处于 postStart 阶段时,执行一下命令
107 | exec:
108 | command: ["/bin/sh", "-c", "echo startup..."] # 将 /app/index.php 复制到挂载的 volume
109 | preStop:
110 | exec:
111 | command:
112 | - sh
113 | - '-c'
114 | - sleep 5 && kill -SIGQUIT 1 # 优雅退出
115 | - name: nginx # 第二个容器名称
116 | image: nginx # 容器镜像
117 | imagePullPolicy: IfNotPresent
118 | livenessProbe: # 存活探测
119 | initialDelaySeconds: 5 # 容器启动后要等待多少秒后才启动存活和就绪探测器
120 | periodSeconds: 10 # 每多少秒执行一次存活探测
121 | httpGet: # 以httpGet方式进行探测
122 | path: / # 探测路径
123 | port: 80 # 探测端口
124 | readinessProbe: # 就绪探测
125 | initialDelaySeconds: 5 # 容器启动后要等待多少秒后才启动存活和就绪探测器
126 | periodSeconds: 10 # 每多少秒执行一次存活探测
127 | httpGet: # 以httpGet方式进行探测
128 | path: / # 探测路径
129 | port: 80 # 探测端口
130 | resources: # 资源约束
131 | requests: # 最小限制
132 | memory: "64Mi" # 内存最新64M
133 | cpu: "250m" # CPU最大使用0.25核
134 | limits: # 最大限制
135 | memory: "128Mi" # 内存最新128M
136 | cpu: "500m" # CPU最大使用0.5核
137 | ports:
138 | - containerPort: 80 # nginx 端口
139 | volumeMounts: # nginx 容器挂载了两个 volume,一个是与 php-fpm 容器共享的 volume,另外一个是配置了 nginx.conf 的 volume
140 | - mountPath: /var/www/html # 挂载两个容器共享的 volume
141 | name: nginx-www
142 | - mountPath: /etc/nginx/nginx.conf # 挂载配置了 nginx.conf 的 volume
143 | subPath: nginx.conf
144 | name: nginx-config
145 | lifecycle:
146 | preStop:
147 | exec:
148 | command:
149 | - sh
150 | - '-c'
151 | - sleep 5 && /usr/sbin/nginx -s quit # 优雅退出
152 | volumes:
153 | - name: nginx-www # 网站文件通过nfs挂载
154 | nfs:
155 | path: /html/
156 | server: 192.168.1.123
157 | - name: nginx-config
158 | configMap: # configMap
159 | name: nginx-config
160 |
--------------------------------------------------------------------------------
/yaml/admin.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 |
6 | namespace: kube-system
7 | ---
8 |
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRoleBinding
11 | metadata:
12 | name: admin-user
13 | annotations:
14 | rbac.authorization.kubernetes.io/autoupdate: "true"
15 | roleRef:
16 | apiGroup: rbac.authorization.k8s.io
17 | kind: ClusterRole
18 | name: cluster-admin
19 | subjects:
20 |
21 | - kind: ServiceAccount
22 | name: admin-user
23 | namespace: kube-system
24 |
25 |
--------------------------------------------------------------------------------
/yaml/cby.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: chenby
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: chenby
10 | template:
11 | metadata:
12 | labels:
13 | app: chenby
14 | spec:
15 | containers:
16 | - name: chenby
17 | image: nginx
18 | resources:
19 | limits:
20 | memory: "128Mi"
21 | cpu: "500m"
22 | ports:
23 | - containerPort: 80
24 |
25 | ---
26 | apiVersion: v1
27 | kind: Service
28 | metadata:
29 | name: chenby
30 | spec:
31 | ipFamilyPolicy: PreferDualStack
32 | ipFamilies:
33 | - IPv6
34 | - IPv4
35 | type: NodePort
36 | selector:
37 | app: chenby
38 | ports:
39 | - port: 80
40 | targetPort: 80
41 |
--------------------------------------------------------------------------------
/yaml/connectivity-check.yaml:
--------------------------------------------------------------------------------
1 | # Automatically generated by Makefile. DO NOT EDIT
2 | ---
3 | metadata:
4 | name: echo-a
5 | labels:
6 | name: echo-a
7 | topology: any
8 | component: network-check
9 | traffic: internal
10 | quarantine: "false"
11 | type: autocheck
12 | spec:
13 | template:
14 | metadata:
15 | labels:
16 | name: echo-a
17 | spec:
18 | hostNetwork: false
19 | containers:
20 | - name: echo-a-container
21 | env:
22 | - name: PORT
23 | value: "8080"
24 | ports:
25 | - containerPort: 8080
26 | image: quay.io/cilium/json-mock:v1.3.2@sha256:bc6c46c74efadb135bc996c2467cece6989302371ef4e3f068361460abaf39be
27 | imagePullPolicy: IfNotPresent
28 | readinessProbe:
29 | timeoutSeconds: 7
30 | exec:
31 | command:
32 | - curl
33 | - -sS
34 | - --fail
35 | - --connect-timeout
36 | - "5"
37 | - -o
38 | - /dev/null
39 | - localhost:8080
40 | livenessProbe:
41 | timeoutSeconds: 7
42 | exec:
43 | command:
44 | - curl
45 | - -sS
46 | - --fail
47 | - --connect-timeout
48 | - "5"
49 | - -o
50 | - /dev/null
51 | - localhost:8080
52 | selector:
53 | matchLabels:
54 | name: echo-a
55 | replicas: 1
56 | apiVersion: apps/v1
57 | kind: Deployment
58 | ---
59 | metadata:
60 | name: echo-b
61 | labels:
62 | name: echo-b
63 | topology: any
64 | component: services-check
65 | traffic: internal
66 | quarantine: "false"
67 | type: autocheck
68 | spec:
69 | template:
70 | metadata:
71 | labels:
72 | name: echo-b
73 | spec:
74 | hostNetwork: false
75 | containers:
76 | - name: echo-b-container
77 | env:
78 | - name: PORT
79 | value: "8080"
80 | ports:
81 | - containerPort: 8080
82 | hostPort: 40000
83 | image: quay.io/cilium/json-mock:v1.3.2@sha256:bc6c46c74efadb135bc996c2467cece6989302371ef4e3f068361460abaf39be
84 | imagePullPolicy: IfNotPresent
85 | readinessProbe:
86 | timeoutSeconds: 7
87 | exec:
88 | command:
89 | - curl
90 | - -sS
91 | - --fail
92 | - --connect-timeout
93 | - "5"
94 | - -o
95 | - /dev/null
96 | - localhost:8080
97 | livenessProbe:
98 | timeoutSeconds: 7
99 | exec:
100 | command:
101 | - curl
102 | - -sS
103 | - --fail
104 | - --connect-timeout
105 | - "5"
106 | - -o
107 | - /dev/null
108 | - localhost:8080
109 | selector:
110 | matchLabels:
111 | name: echo-b
112 | replicas: 1
113 | apiVersion: apps/v1
114 | kind: Deployment
115 | ---
116 | metadata:
117 | name: echo-b-host
118 | labels:
119 | name: echo-b-host
120 | topology: any
121 | component: services-check
122 | traffic: internal
123 | quarantine: "false"
124 | type: autocheck
125 | spec:
126 | template:
127 | metadata:
128 | labels:
129 | name: echo-b-host
130 | spec:
131 | hostNetwork: true
132 | containers:
133 | - name: echo-b-host-container
134 | env:
135 | - name: PORT
136 | value: "31000"
137 | ports: []
138 | image: quay.io/cilium/json-mock:v1.3.2@sha256:bc6c46c74efadb135bc996c2467cece6989302371ef4e3f068361460abaf39be
139 | imagePullPolicy: IfNotPresent
140 | readinessProbe:
141 | timeoutSeconds: 7
142 | exec:
143 | command:
144 | - curl
145 | - -sS
146 | - --fail
147 | - --connect-timeout
148 | - "5"
149 | - -o
150 | - /dev/null
151 | - localhost:31000
152 | livenessProbe:
153 | timeoutSeconds: 7
154 | exec:
155 | command:
156 | - curl
157 | - -sS
158 | - --fail
159 | - --connect-timeout
160 | - "5"
161 | - -o
162 | - /dev/null
163 | - localhost:31000
164 | affinity:
165 | podAffinity:
166 | requiredDuringSchedulingIgnoredDuringExecution:
167 | - labelSelector:
168 | matchExpressions:
169 | - key: name
170 | operator: In
171 | values:
172 | - echo-b
173 | topologyKey: kubernetes.io/hostname
174 | selector:
175 | matchLabels:
176 | name: echo-b-host
177 | replicas: 1
178 | apiVersion: apps/v1
179 | kind: Deployment
180 | ---
181 | metadata:
182 | name: pod-to-a
183 | labels:
184 | name: pod-to-a
185 | topology: any
186 | component: network-check
187 | traffic: internal
188 | quarantine: "false"
189 | type: autocheck
190 | spec:
191 | template:
192 | metadata:
193 | labels:
194 | name: pod-to-a
195 | spec:
196 | hostNetwork: false
197 | containers:
198 | - name: pod-to-a-container
199 | ports: []
200 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
201 | imagePullPolicy: IfNotPresent
202 | command:
203 | - /bin/ash
204 | - -c
205 | - sleep 1000000000
206 | readinessProbe:
207 | timeoutSeconds: 7
208 | exec:
209 | command:
210 | - curl
211 | - -sS
212 | - --fail
213 | - --connect-timeout
214 | - "5"
215 | - -o
216 | - /dev/null
217 | - echo-a:8080/public
218 | livenessProbe:
219 | timeoutSeconds: 7
220 | exec:
221 | command:
222 | - curl
223 | - -sS
224 | - --fail
225 | - --connect-timeout
226 | - "5"
227 | - -o
228 | - /dev/null
229 | - echo-a:8080/public
230 | selector:
231 | matchLabels:
232 | name: pod-to-a
233 | replicas: 1
234 | apiVersion: apps/v1
235 | kind: Deployment
236 | ---
237 | metadata:
238 | name: pod-to-external-1111
239 | labels:
240 | name: pod-to-external-1111
241 | topology: any
242 | component: network-check
243 | traffic: external
244 | quarantine: "false"
245 | type: autocheck
246 | spec:
247 | template:
248 | metadata:
249 | labels:
250 | name: pod-to-external-1111
251 | spec:
252 | hostNetwork: false
253 | containers:
254 | - name: pod-to-external-1111-container
255 | ports: []
256 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
257 | imagePullPolicy: IfNotPresent
258 | command:
259 | - /bin/ash
260 | - -c
261 | - sleep 1000000000
262 | readinessProbe:
263 | timeoutSeconds: 7
264 | exec:
265 | command:
266 | - curl
267 | - -sS
268 | - --fail
269 | - --connect-timeout
270 | - "5"
271 | - -o
272 | - /dev/null
273 | - https://1.1.1.1
274 | livenessProbe:
275 | timeoutSeconds: 7
276 | exec:
277 | command:
278 | - curl
279 | - -sS
280 | - --fail
281 | - --connect-timeout
282 | - "5"
283 | - -o
284 | - /dev/null
285 | - https://1.1.1.1
286 | selector:
287 | matchLabels:
288 | name: pod-to-external-1111
289 | replicas: 1
290 | apiVersion: apps/v1
291 | kind: Deployment
292 | ---
293 | metadata:
294 | name: pod-to-a-denied-cnp
295 | labels:
296 | name: pod-to-a-denied-cnp
297 | topology: any
298 | component: policy-check
299 | traffic: internal
300 | quarantine: "false"
301 | type: autocheck
302 | spec:
303 | template:
304 | metadata:
305 | labels:
306 | name: pod-to-a-denied-cnp
307 | spec:
308 | hostNetwork: false
309 | containers:
310 | - name: pod-to-a-denied-cnp-container
311 | ports: []
312 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
313 | imagePullPolicy: IfNotPresent
314 | command:
315 | - /bin/ash
316 | - -c
317 | - sleep 1000000000
318 | readinessProbe:
319 | timeoutSeconds: 7
320 | exec:
321 | command:
322 | - ash
323 | - -c
324 | - '! curl -s --fail --connect-timeout 5 -o /dev/null echo-a:8080/private'
325 | livenessProbe:
326 | timeoutSeconds: 7
327 | exec:
328 | command:
329 | - ash
330 | - -c
331 | - '! curl -s --fail --connect-timeout 5 -o /dev/null echo-a:8080/private'
332 | selector:
333 | matchLabels:
334 | name: pod-to-a-denied-cnp
335 | replicas: 1
336 | apiVersion: apps/v1
337 | kind: Deployment
338 | ---
339 | metadata:
340 | name: pod-to-a-allowed-cnp
341 | labels:
342 | name: pod-to-a-allowed-cnp
343 | topology: any
344 | component: policy-check
345 | traffic: internal
346 | quarantine: "false"
347 | type: autocheck
348 | spec:
349 | template:
350 | metadata:
351 | labels:
352 | name: pod-to-a-allowed-cnp
353 | spec:
354 | hostNetwork: false
355 | containers:
356 | - name: pod-to-a-allowed-cnp-container
357 | ports: []
358 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
359 | imagePullPolicy: IfNotPresent
360 | command:
361 | - /bin/ash
362 | - -c
363 | - sleep 1000000000
364 | readinessProbe:
365 | timeoutSeconds: 7
366 | exec:
367 | command:
368 | - curl
369 | - -sS
370 | - --fail
371 | - --connect-timeout
372 | - "5"
373 | - -o
374 | - /dev/null
375 | - echo-a:8080/public
376 | livenessProbe:
377 | timeoutSeconds: 7
378 | exec:
379 | command:
380 | - curl
381 | - -sS
382 | - --fail
383 | - --connect-timeout
384 | - "5"
385 | - -o
386 | - /dev/null
387 | - echo-a:8080/public
388 | selector:
389 | matchLabels:
390 | name: pod-to-a-allowed-cnp
391 | replicas: 1
392 | apiVersion: apps/v1
393 | kind: Deployment
394 | ---
395 | metadata:
396 | name: pod-to-external-fqdn-allow-google-cnp
397 | labels:
398 | name: pod-to-external-fqdn-allow-google-cnp
399 | topology: any
400 | component: policy-check
401 | traffic: external
402 | quarantine: "false"
403 | type: autocheck
404 | spec:
405 | template:
406 | metadata:
407 | labels:
408 | name: pod-to-external-fqdn-allow-google-cnp
409 | spec:
410 | hostNetwork: false
411 | containers:
412 | - name: pod-to-external-fqdn-allow-google-cnp-container
413 | ports: []
414 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
415 | imagePullPolicy: IfNotPresent
416 | command:
417 | - /bin/ash
418 | - -c
419 | - sleep 1000000000
420 | readinessProbe:
421 | timeoutSeconds: 7
422 | exec:
423 | command:
424 | - curl
425 | - -sS
426 | - --fail
427 | - --connect-timeout
428 | - "5"
429 | - -o
430 | - /dev/null
431 | - www.baidu.cn
432 | livenessProbe:
433 | timeoutSeconds: 7
434 | exec:
435 | command:
436 | - curl
437 | - -sS
438 | - --fail
439 | - --connect-timeout
440 | - "5"
441 | - -o
442 | - /dev/null
443 | - www.baidu.cn
444 | selector:
445 | matchLabels:
446 | name: pod-to-external-fqdn-allow-google-cnp
447 | replicas: 1
448 | apiVersion: apps/v1
449 | kind: Deployment
450 | ---
451 | metadata:
452 | name: pod-to-b-multi-node-clusterip
453 | labels:
454 | name: pod-to-b-multi-node-clusterip
455 | topology: multi-node
456 | component: services-check
457 | traffic: internal
458 | quarantine: "false"
459 | type: autocheck
460 | spec:
461 | template:
462 | metadata:
463 | labels:
464 | name: pod-to-b-multi-node-clusterip
465 | spec:
466 | hostNetwork: false
467 | containers:
468 | - name: pod-to-b-multi-node-clusterip-container
469 | ports: []
470 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
471 | imagePullPolicy: IfNotPresent
472 | command:
473 | - /bin/ash
474 | - -c
475 | - sleep 1000000000
476 | readinessProbe:
477 | timeoutSeconds: 7
478 | exec:
479 | command:
480 | - curl
481 | - -sS
482 | - --fail
483 | - --connect-timeout
484 | - "5"
485 | - -o
486 | - /dev/null
487 | - echo-b:8080/public
488 | livenessProbe:
489 | timeoutSeconds: 7
490 | exec:
491 | command:
492 | - curl
493 | - -sS
494 | - --fail
495 | - --connect-timeout
496 | - "5"
497 | - -o
498 | - /dev/null
499 | - echo-b:8080/public
500 | affinity:
501 | podAntiAffinity:
502 | requiredDuringSchedulingIgnoredDuringExecution:
503 | - labelSelector:
504 | matchExpressions:
505 | - key: name
506 | operator: In
507 | values:
508 | - echo-b
509 | topologyKey: kubernetes.io/hostname
510 | selector:
511 | matchLabels:
512 | name: pod-to-b-multi-node-clusterip
513 | replicas: 1
514 | apiVersion: apps/v1
515 | kind: Deployment
516 | ---
517 | metadata:
518 | name: pod-to-b-multi-node-headless
519 | labels:
520 | name: pod-to-b-multi-node-headless
521 | topology: multi-node
522 | component: services-check
523 | traffic: internal
524 | quarantine: "false"
525 | type: autocheck
526 | spec:
527 | template:
528 | metadata:
529 | labels:
530 | name: pod-to-b-multi-node-headless
531 | spec:
532 | hostNetwork: false
533 | containers:
534 | - name: pod-to-b-multi-node-headless-container
535 | ports: []
536 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
537 | imagePullPolicy: IfNotPresent
538 | command:
539 | - /bin/ash
540 | - -c
541 | - sleep 1000000000
542 | readinessProbe:
543 | timeoutSeconds: 7
544 | exec:
545 | command:
546 | - curl
547 | - -sS
548 | - --fail
549 | - --connect-timeout
550 | - "5"
551 | - -o
552 | - /dev/null
553 | - echo-b-headless:8080/public
554 | livenessProbe:
555 | timeoutSeconds: 7
556 | exec:
557 | command:
558 | - curl
559 | - -sS
560 | - --fail
561 | - --connect-timeout
562 | - "5"
563 | - -o
564 | - /dev/null
565 | - echo-b-headless:8080/public
566 | affinity:
567 | podAntiAffinity:
568 | requiredDuringSchedulingIgnoredDuringExecution:
569 | - labelSelector:
570 | matchExpressions:
571 | - key: name
572 | operator: In
573 | values:
574 | - echo-b
575 | topologyKey: kubernetes.io/hostname
576 | selector:
577 | matchLabels:
578 | name: pod-to-b-multi-node-headless
579 | replicas: 1
580 | apiVersion: apps/v1
581 | kind: Deployment
582 | ---
583 | metadata:
584 | name: host-to-b-multi-node-clusterip
585 | labels:
586 | name: host-to-b-multi-node-clusterip
587 | topology: multi-node
588 | component: services-check
589 | traffic: internal
590 | quarantine: "false"
591 | type: autocheck
592 | spec:
593 | template:
594 | metadata:
595 | labels:
596 | name: host-to-b-multi-node-clusterip
597 | spec:
598 | hostNetwork: true
599 | containers:
600 | - name: host-to-b-multi-node-clusterip-container
601 | ports: []
602 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
603 | imagePullPolicy: IfNotPresent
604 | command:
605 | - /bin/ash
606 | - -c
607 | - sleep 1000000000
608 | readinessProbe:
609 | timeoutSeconds: 7
610 | exec:
611 | command:
612 | - curl
613 | - -sS
614 | - --fail
615 | - --connect-timeout
616 | - "5"
617 | - -o
618 | - /dev/null
619 | - echo-b:8080/public
620 | livenessProbe:
621 | timeoutSeconds: 7
622 | exec:
623 | command:
624 | - curl
625 | - -sS
626 | - --fail
627 | - --connect-timeout
628 | - "5"
629 | - -o
630 | - /dev/null
631 | - echo-b:8080/public
632 | affinity:
633 | podAntiAffinity:
634 | requiredDuringSchedulingIgnoredDuringExecution:
635 | - labelSelector:
636 | matchExpressions:
637 | - key: name
638 | operator: In
639 | values:
640 | - echo-b
641 | topologyKey: kubernetes.io/hostname
642 | dnsPolicy: ClusterFirstWithHostNet
643 | selector:
644 | matchLabels:
645 | name: host-to-b-multi-node-clusterip
646 | replicas: 1
647 | apiVersion: apps/v1
648 | kind: Deployment
649 | ---
650 | metadata:
651 | name: host-to-b-multi-node-headless
652 | labels:
653 | name: host-to-b-multi-node-headless
654 | topology: multi-node
655 | component: services-check
656 | traffic: internal
657 | quarantine: "false"
658 | type: autocheck
659 | spec:
660 | template:
661 | metadata:
662 | labels:
663 | name: host-to-b-multi-node-headless
664 | spec:
665 | hostNetwork: true
666 | containers:
667 | - name: host-to-b-multi-node-headless-container
668 | ports: []
669 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
670 | imagePullPolicy: IfNotPresent
671 | command:
672 | - /bin/ash
673 | - -c
674 | - sleep 1000000000
675 | readinessProbe:
676 | timeoutSeconds: 7
677 | exec:
678 | command:
679 | - curl
680 | - -sS
681 | - --fail
682 | - --connect-timeout
683 | - "5"
684 | - -o
685 | - /dev/null
686 | - echo-b-headless:8080/public
687 | livenessProbe:
688 | timeoutSeconds: 7
689 | exec:
690 | command:
691 | - curl
692 | - -sS
693 | - --fail
694 | - --connect-timeout
695 | - "5"
696 | - -o
697 | - /dev/null
698 | - echo-b-headless:8080/public
699 | affinity:
700 | podAntiAffinity:
701 | requiredDuringSchedulingIgnoredDuringExecution:
702 | - labelSelector:
703 | matchExpressions:
704 | - key: name
705 | operator: In
706 | values:
707 | - echo-b
708 | topologyKey: kubernetes.io/hostname
709 | dnsPolicy: ClusterFirstWithHostNet
710 | selector:
711 | matchLabels:
712 | name: host-to-b-multi-node-headless
713 | replicas: 1
714 | apiVersion: apps/v1
715 | kind: Deployment
716 | ---
717 | metadata:
718 | name: pod-to-b-multi-node-nodeport
719 | labels:
720 | name: pod-to-b-multi-node-nodeport
721 | topology: multi-node
722 | component: nodeport-check
723 | traffic: internal
724 | quarantine: "false"
725 | type: autocheck
726 | spec:
727 | template:
728 | metadata:
729 | labels:
730 | name: pod-to-b-multi-node-nodeport
731 | spec:
732 | hostNetwork: false
733 | containers:
734 | - name: pod-to-b-multi-node-nodeport-container
735 | ports: []
736 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
737 | imagePullPolicy: IfNotPresent
738 | command:
739 | - /bin/ash
740 | - -c
741 | - sleep 1000000000
742 | readinessProbe:
743 | timeoutSeconds: 7
744 | exec:
745 | command:
746 | - curl
747 | - -sS
748 | - --fail
749 | - --connect-timeout
750 | - "5"
751 | - -o
752 | - /dev/null
753 | - echo-b-host-headless:31414/public
754 | livenessProbe:
755 | timeoutSeconds: 7
756 | exec:
757 | command:
758 | - curl
759 | - -sS
760 | - --fail
761 | - --connect-timeout
762 | - "5"
763 | - -o
764 | - /dev/null
765 | - echo-b-host-headless:31414/public
766 | affinity:
767 | podAntiAffinity:
768 | requiredDuringSchedulingIgnoredDuringExecution:
769 | - labelSelector:
770 | matchExpressions:
771 | - key: name
772 | operator: In
773 | values:
774 | - echo-b
775 | topologyKey: kubernetes.io/hostname
776 | selector:
777 | matchLabels:
778 | name: pod-to-b-multi-node-nodeport
779 | replicas: 1
780 | apiVersion: apps/v1
781 | kind: Deployment
782 | ---
783 | metadata:
784 | name: pod-to-b-intra-node-nodeport
785 | labels:
786 | name: pod-to-b-intra-node-nodeport
787 | topology: intra-node
788 | component: nodeport-check
789 | traffic: internal
790 | quarantine: "false"
791 | type: autocheck
792 | spec:
793 | template:
794 | metadata:
795 | labels:
796 | name: pod-to-b-intra-node-nodeport
797 | spec:
798 | hostNetwork: false
799 | containers:
800 | - name: pod-to-b-intra-node-nodeport-container
801 | ports: []
802 | image: quay.io/cilium/alpine-curl:v1.5.0@sha256:7b286939730d8af1149ef88dba15739d8330bb83d7d9853a23e5ab4043e2d33c
803 | imagePullPolicy: IfNotPresent
804 | command:
805 | - /bin/ash
806 | - -c
807 | - sleep 1000000000
808 | readinessProbe:
809 | timeoutSeconds: 7
810 | exec:
811 | command:
812 | - curl
813 | - -sS
814 | - --fail
815 | - --connect-timeout
816 | - "5"
817 | - -o
818 | - /dev/null
819 | - echo-b-host-headless:31414/public
820 | livenessProbe:
821 | timeoutSeconds: 7
822 | exec:
823 | command:
824 | - curl
825 | - -sS
826 | - --fail
827 | - --connect-timeout
828 | - "5"
829 | - -o
830 | - /dev/null
831 | - echo-b-host-headless:31414/public
832 | affinity:
833 | podAffinity:
834 | requiredDuringSchedulingIgnoredDuringExecution:
835 | - labelSelector:
836 | matchExpressions:
837 | - key: name
838 | operator: In
839 | values:
840 | - echo-b
841 | topologyKey: kubernetes.io/hostname
842 | selector:
843 | matchLabels:
844 | name: pod-to-b-intra-node-nodeport
845 | replicas: 1
846 | apiVersion: apps/v1
847 | kind: Deployment
848 | ---
849 | metadata:
850 | name: echo-a
851 | labels:
852 | name: echo-a
853 | topology: any
854 | component: network-check
855 | traffic: internal
856 | quarantine: "false"
857 | type: autocheck
858 | spec:
859 | ports:
860 | - name: http
861 | port: 8080
862 | type: ClusterIP
863 | selector:
864 | name: echo-a
865 | apiVersion: v1
866 | kind: Service
867 | ---
868 | metadata:
869 | name: echo-b
870 | labels:
871 | name: echo-b
872 | topology: any
873 | component: services-check
874 | traffic: internal
875 | quarantine: "false"
876 | type: autocheck
877 | spec:
878 | ports:
879 | - name: http
880 | port: 8080
881 | nodePort: 31414
882 | type: NodePort
883 | selector:
884 | name: echo-b
885 | apiVersion: v1
886 | kind: Service
887 | ---
888 | metadata:
889 | name: echo-b-headless
890 | labels:
891 | name: echo-b-headless
892 | topology: any
893 | component: services-check
894 | traffic: internal
895 | quarantine: "false"
896 | type: autocheck
897 | spec:
898 | ports:
899 | - name: http
900 | port: 8080
901 | type: ClusterIP
902 | selector:
903 | name: echo-b
904 | clusterIP: None
905 | apiVersion: v1
906 | kind: Service
907 | ---
908 | metadata:
909 | name: echo-b-host-headless
910 | labels:
911 | name: echo-b-host-headless
912 | topology: any
913 | component: services-check
914 | traffic: internal
915 | quarantine: "false"
916 | type: autocheck
917 | spec:
918 | ports: []
919 | type: ClusterIP
920 | selector:
921 | name: echo-b-host
922 | clusterIP: None
923 | apiVersion: v1
924 | kind: Service
925 | ---
926 | metadata:
927 | name: pod-to-a-denied-cnp
928 | labels:
929 | name: pod-to-a-denied-cnp
930 | topology: any
931 | component: policy-check
932 | traffic: internal
933 | quarantine: "false"
934 | type: autocheck
935 | spec:
936 | endpointSelector:
937 | matchLabels:
938 | name: pod-to-a-denied-cnp
939 | egress:
940 | - toPorts:
941 | - ports:
942 | - port: "53"
943 | protocol: ANY
944 | toEndpoints:
945 | - matchLabels:
946 | k8s:io.kubernetes.pod.namespace: kube-system
947 | k8s:k8s-app: kube-dns
948 | - matchLabels:
949 | k8s:io.kubernetes.pod.namespace: kube-system
950 | k8s:k8s-app: node-local-dns
951 | - toPorts:
952 | - ports:
953 | - port: "5353"
954 | protocol: UDP
955 | toEndpoints:
956 | - matchLabels:
957 | k8s:io.kubernetes.pod.namespace: openshift-dns
958 | k8s:dns.operator.openshift.io/daemonset-dns: default
959 | apiVersion: cilium.io/v2
960 | kind: CiliumNetworkPolicy
961 | ---
962 | metadata:
963 | name: pod-to-a-allowed-cnp
964 | labels:
965 | name: pod-to-a-allowed-cnp
966 | topology: any
967 | component: policy-check
968 | traffic: internal
969 | quarantine: "false"
970 | type: autocheck
971 | spec:
972 | endpointSelector:
973 | matchLabels:
974 | name: pod-to-a-allowed-cnp
975 | egress:
976 | - toPorts:
977 | - ports:
978 | - port: "8080"
979 | protocol: TCP
980 | toEndpoints:
981 | - matchLabels:
982 | name: echo-a
983 | - toPorts:
984 | - ports:
985 | - port: "53"
986 | protocol: ANY
987 | toEndpoints:
988 | - matchLabels:
989 | k8s:io.kubernetes.pod.namespace: kube-system
990 | k8s:k8s-app: kube-dns
991 | - matchLabels:
992 | k8s:io.kubernetes.pod.namespace: kube-system
993 | k8s:k8s-app: node-local-dns
994 | - toPorts:
995 | - ports:
996 | - port: "5353"
997 | protocol: UDP
998 | toEndpoints:
999 | - matchLabels:
1000 | k8s:io.kubernetes.pod.namespace: openshift-dns
1001 | k8s:dns.operator.openshift.io/daemonset-dns: default
1002 | apiVersion: cilium.io/v2
1003 | kind: CiliumNetworkPolicy
1004 | ---
1005 | metadata:
1006 | name: pod-to-external-fqdn-allow-google-cnp
1007 | labels:
1008 | name: pod-to-external-fqdn-allow-google-cnp
1009 | topology: any
1010 | component: policy-check
1011 | traffic: external
1012 | quarantine: "false"
1013 | type: autocheck
1014 | spec:
1015 | endpointSelector:
1016 | matchLabels:
1017 | name: pod-to-external-fqdn-allow-google-cnp
1018 | egress:
1019 | - toFQDNs:
1020 | - matchPattern: '*.baidu.cn'
1021 | - toPorts:
1022 | - ports:
1023 | - port: "53"
1024 | protocol: ANY
1025 | rules:
1026 | dns:
1027 | - matchPattern: '*'
1028 | toEndpoints:
1029 | - matchLabels:
1030 | k8s:io.kubernetes.pod.namespace: kube-system
1031 | k8s:k8s-app: kube-dns
1032 | - matchLabels:
1033 | k8s:io.kubernetes.pod.namespace: kube-system
1034 | k8s:k8s-app: node-local-dns
1035 | - toPorts:
1036 | - ports:
1037 | - port: "5353"
1038 | protocol: UDP
1039 | rules:
1040 | dns:
1041 | - matchPattern: '*'
1042 | toEndpoints:
1043 | - matchLabels:
1044 | k8s:io.kubernetes.pod.namespace: openshift-dns
1045 | k8s:dns.operator.openshift.io/daemonset-dns: default
1046 | apiVersion: cilium.io/v2
1047 | kind: CiliumNetworkPolicy
1048 |
1049 |
--------------------------------------------------------------------------------
/yaml/dashboard-user.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 | namespace: kubernetes-dashboard
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: admin-user
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | name: admin-user
18 | namespace: kubernetes-dashboard
19 |
--------------------------------------------------------------------------------
/yaml/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: ingress-nginx
5 | labels:
6 | app.kubernetes.io/name: ingress-nginx
7 | app.kubernetes.io/instance: ingress-nginx
8 |
9 |
10 | ---
11 | # Source: ingress-nginx/templates/controller-serviceaccount.yaml
12 | apiVersion: v1
13 | kind: ServiceAccount
14 | metadata:
15 | labels:
16 | helm.sh/chart: ingress-nginx-4.0.10
17 | app.kubernetes.io/name: ingress-nginx
18 | app.kubernetes.io/instance: ingress-nginx
19 | app.kubernetes.io/version: 1.1.0
20 | app.kubernetes.io/managed-by: Helm
21 | app.kubernetes.io/component: controller
22 | name: ingress-nginx
23 | namespace: ingress-nginx
24 | automountServiceAccountToken: true
25 | ---
26 | # Source: ingress-nginx/templates/controller-configmap.yaml
27 | apiVersion: v1
28 | kind: ConfigMap
29 | metadata:
30 | labels:
31 | helm.sh/chart: ingress-nginx-4.0.10
32 | app.kubernetes.io/name: ingress-nginx
33 | app.kubernetes.io/instance: ingress-nginx
34 | app.kubernetes.io/version: 1.1.0
35 | app.kubernetes.io/managed-by: Helm
36 | app.kubernetes.io/component: controller
37 | name: ingress-nginx-controller
38 | namespace: ingress-nginx
39 | data:
40 | allow-snippet-annotations: 'true'
41 | ---
42 | # Source: ingress-nginx/templates/clusterrole.yaml
43 | apiVersion: rbac.authorization.k8s.io/v1
44 | kind: ClusterRole
45 | metadata:
46 | labels:
47 | helm.sh/chart: ingress-nginx-4.0.10
48 | app.kubernetes.io/name: ingress-nginx
49 | app.kubernetes.io/instance: ingress-nginx
50 | app.kubernetes.io/version: 1.1.0
51 | app.kubernetes.io/managed-by: Helm
52 | name: ingress-nginx
53 | rules:
54 | - apiGroups:
55 | - ''
56 | resources:
57 | - configmaps
58 | - endpoints
59 | - nodes
60 | - pods
61 | - secrets
62 | - namespaces
63 | verbs:
64 | - list
65 | - watch
66 | - apiGroups:
67 | - ''
68 | resources:
69 | - nodes
70 | verbs:
71 | - get
72 | - apiGroups:
73 | - ''
74 | resources:
75 | - services
76 | verbs:
77 | - get
78 | - list
79 | - watch
80 | - apiGroups:
81 | - networking.k8s.io
82 | resources:
83 | - ingresses
84 | verbs:
85 | - get
86 | - list
87 | - watch
88 | - apiGroups:
89 | - ''
90 | resources:
91 | - events
92 | verbs:
93 | - create
94 | - patch
95 | - apiGroups:
96 | - networking.k8s.io
97 | resources:
98 | - ingresses/status
99 | verbs:
100 | - update
101 | - apiGroups:
102 | - networking.k8s.io
103 | resources:
104 | - ingressclasses
105 | verbs:
106 | - get
107 | - list
108 | - watch
109 | ---
110 | # Source: ingress-nginx/templates/clusterrolebinding.yaml
111 | apiVersion: rbac.authorization.k8s.io/v1
112 | kind: ClusterRoleBinding
113 | metadata:
114 | labels:
115 | helm.sh/chart: ingress-nginx-4.0.10
116 | app.kubernetes.io/name: ingress-nginx
117 | app.kubernetes.io/instance: ingress-nginx
118 | app.kubernetes.io/version: 1.1.0
119 | app.kubernetes.io/managed-by: Helm
120 | name: ingress-nginx
121 | roleRef:
122 | apiGroup: rbac.authorization.k8s.io
123 | kind: ClusterRole
124 | name: ingress-nginx
125 | subjects:
126 | - kind: ServiceAccount
127 | name: ingress-nginx
128 | namespace: ingress-nginx
129 | ---
130 | # Source: ingress-nginx/templates/controller-role.yaml
131 | apiVersion: rbac.authorization.k8s.io/v1
132 | kind: Role
133 | metadata:
134 | labels:
135 | helm.sh/chart: ingress-nginx-4.0.10
136 | app.kubernetes.io/name: ingress-nginx
137 | app.kubernetes.io/instance: ingress-nginx
138 | app.kubernetes.io/version: 1.1.0
139 | app.kubernetes.io/managed-by: Helm
140 | app.kubernetes.io/component: controller
141 | name: ingress-nginx
142 | namespace: ingress-nginx
143 | rules:
144 | - apiGroups:
145 | - ''
146 | resources:
147 | - namespaces
148 | verbs:
149 | - get
150 | - apiGroups:
151 | - ''
152 | resources:
153 | - configmaps
154 | - pods
155 | - secrets
156 | - endpoints
157 | verbs:
158 | - get
159 | - list
160 | - watch
161 | - apiGroups:
162 | - ''
163 | resources:
164 | - services
165 | verbs:
166 | - get
167 | - list
168 | - watch
169 | - apiGroups:
170 | - networking.k8s.io
171 | resources:
172 | - ingresses
173 | verbs:
174 | - get
175 | - list
176 | - watch
177 | - apiGroups:
178 | - networking.k8s.io
179 | resources:
180 | - ingresses/status
181 | verbs:
182 | - update
183 | - apiGroups:
184 | - networking.k8s.io
185 | resources:
186 | - ingressclasses
187 | verbs:
188 | - get
189 | - list
190 | - watch
191 | - apiGroups:
192 | - ''
193 | resources:
194 | - configmaps
195 | resourceNames:
196 | - ingress-controller-leader
197 | verbs:
198 | - get
199 | - update
200 | - apiGroups:
201 | - ''
202 | resources:
203 | - configmaps
204 | verbs:
205 | - create
206 | - apiGroups:
207 | - ''
208 | resources:
209 | - events
210 | verbs:
211 | - create
212 | - patch
213 | ---
214 | # Source: ingress-nginx/templates/controller-rolebinding.yaml
215 | apiVersion: rbac.authorization.k8s.io/v1
216 | kind: RoleBinding
217 | metadata:
218 | labels:
219 | helm.sh/chart: ingress-nginx-4.0.10
220 | app.kubernetes.io/name: ingress-nginx
221 | app.kubernetes.io/instance: ingress-nginx
222 | app.kubernetes.io/version: 1.1.0
223 | app.kubernetes.io/managed-by: Helm
224 | app.kubernetes.io/component: controller
225 | name: ingress-nginx
226 | namespace: ingress-nginx
227 | roleRef:
228 | apiGroup: rbac.authorization.k8s.io
229 | kind: Role
230 | name: ingress-nginx
231 | subjects:
232 | - kind: ServiceAccount
233 | name: ingress-nginx
234 | namespace: ingress-nginx
235 | ---
236 | # Source: ingress-nginx/templates/controller-service-webhook.yaml
237 | apiVersion: v1
238 | kind: Service
239 | metadata:
240 | labels:
241 | helm.sh/chart: ingress-nginx-4.0.10
242 | app.kubernetes.io/name: ingress-nginx
243 | app.kubernetes.io/instance: ingress-nginx
244 | app.kubernetes.io/version: 1.1.0
245 | app.kubernetes.io/managed-by: Helm
246 | app.kubernetes.io/component: controller
247 | name: ingress-nginx-controller-admission
248 | namespace: ingress-nginx
249 | spec:
250 | type: ClusterIP
251 | ports:
252 | - name: https-webhook
253 | port: 443
254 | targetPort: webhook
255 | appProtocol: https
256 | selector:
257 | app.kubernetes.io/name: ingress-nginx
258 | app.kubernetes.io/instance: ingress-nginx
259 | app.kubernetes.io/component: controller
260 | ---
261 | # Source: ingress-nginx/templates/controller-service.yaml
262 | apiVersion: v1
263 | kind: Service
264 | metadata:
265 | annotations:
266 | labels:
267 | helm.sh/chart: ingress-nginx-4.0.10
268 | app.kubernetes.io/name: ingress-nginx
269 | app.kubernetes.io/instance: ingress-nginx
270 | app.kubernetes.io/version: 1.1.0
271 | app.kubernetes.io/managed-by: Helm
272 | app.kubernetes.io/component: controller
273 | name: ingress-nginx-controller
274 | namespace: ingress-nginx
275 | spec:
276 | type: NodePort
277 | externalTrafficPolicy: Local
278 | ipFamilyPolicy: SingleStack
279 | ipFamilies:
280 | - IPv4
281 | ports:
282 | - name: http
283 | port: 80
284 | protocol: TCP
285 | targetPort: http
286 | appProtocol: http
287 | - name: https
288 | port: 443
289 | protocol: TCP
290 | targetPort: https
291 | appProtocol: https
292 | selector:
293 | app.kubernetes.io/name: ingress-nginx
294 | app.kubernetes.io/instance: ingress-nginx
295 | app.kubernetes.io/component: controller
296 | ---
297 | # Source: ingress-nginx/templates/controller-deployment.yaml
298 | apiVersion: apps/v1
299 | kind: Deployment
300 | metadata:
301 | labels:
302 | helm.sh/chart: ingress-nginx-4.0.10
303 | app.kubernetes.io/name: ingress-nginx
304 | app.kubernetes.io/instance: ingress-nginx
305 | app.kubernetes.io/version: 1.1.0
306 | app.kubernetes.io/managed-by: Helm
307 | app.kubernetes.io/component: controller
308 | name: ingress-nginx-controller
309 | namespace: ingress-nginx
310 | spec:
311 | selector:
312 | matchLabels:
313 | app.kubernetes.io/name: ingress-nginx
314 | app.kubernetes.io/instance: ingress-nginx
315 | app.kubernetes.io/component: controller
316 | revisionHistoryLimit: 10
317 | minReadySeconds: 0
318 | template:
319 | metadata:
320 | labels:
321 | app.kubernetes.io/name: ingress-nginx
322 | app.kubernetes.io/instance: ingress-nginx
323 | app.kubernetes.io/component: controller
324 | spec:
325 | dnsPolicy: ClusterFirst
326 | containers:
327 | - name: controller
328 | image: registry.cn-hangzhou.aliyuncs.com/chenby/controller:v1.2.0
329 | imagePullPolicy: IfNotPresent
330 | lifecycle:
331 | preStop:
332 | exec:
333 | command:
334 | - /wait-shutdown
335 | args:
336 | - /nginx-ingress-controller
337 | - --election-id=ingress-controller-leader
338 | - --controller-class=k8s.io/ingress-nginx
339 | - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
340 | - --validating-webhook=:8443
341 | - --validating-webhook-certificate=/usr/local/certificates/cert
342 | - --validating-webhook-key=/usr/local/certificates/key
343 | securityContext:
344 | capabilities:
345 | drop:
346 | - ALL
347 | add:
348 | - NET_BIND_SERVICE
349 | runAsUser: 101
350 | allowPrivilegeEscalation: true
351 | env:
352 | - name: POD_NAME
353 | valueFrom:
354 | fieldRef:
355 | fieldPath: metadata.name
356 | - name: POD_NAMESPACE
357 | valueFrom:
358 | fieldRef:
359 | fieldPath: metadata.namespace
360 | - name: LD_PRELOAD
361 | value: /usr/local/lib/libmimalloc.so
362 | livenessProbe:
363 | failureThreshold: 5
364 | httpGet:
365 | path: /healthz
366 | port: 10254
367 | scheme: HTTP
368 | initialDelaySeconds: 10
369 | periodSeconds: 10
370 | successThreshold: 1
371 | timeoutSeconds: 1
372 | readinessProbe:
373 | failureThreshold: 3
374 | httpGet:
375 | path: /healthz
376 | port: 10254
377 | scheme: HTTP
378 | initialDelaySeconds: 10
379 | periodSeconds: 10
380 | successThreshold: 1
381 | timeoutSeconds: 1
382 | ports:
383 | - name: http
384 | containerPort: 80
385 | protocol: TCP
386 | - name: https
387 | containerPort: 443
388 | protocol: TCP
389 | - name: webhook
390 | containerPort: 8443
391 | protocol: TCP
392 | volumeMounts:
393 | - name: webhook-cert
394 | mountPath: /usr/local/certificates/
395 | readOnly: true
396 | resources:
397 | requests:
398 | cpu: 100m
399 | memory: 90Mi
400 | nodeSelector:
401 | kubernetes.io/os: linux
402 | serviceAccountName: ingress-nginx
403 | terminationGracePeriodSeconds: 300
404 | volumes:
405 | - name: webhook-cert
406 | secret:
407 | secretName: ingress-nginx-admission
408 | ---
409 | # Source: ingress-nginx/templates/controller-ingressclass.yaml
410 | # We don't support namespaced ingressClass yet
411 | # So a ClusterRole and a ClusterRoleBinding is required
412 | apiVersion: networking.k8s.io/v1
413 | kind: IngressClass
414 | metadata:
415 | labels:
416 | helm.sh/chart: ingress-nginx-4.0.10
417 | app.kubernetes.io/name: ingress-nginx
418 | app.kubernetes.io/instance: ingress-nginx
419 | app.kubernetes.io/version: 1.1.0
420 | app.kubernetes.io/managed-by: Helm
421 | app.kubernetes.io/component: controller
422 | name: nginx
423 | namespace: ingress-nginx
424 | spec:
425 | controller: k8s.io/ingress-nginx
426 | ---
427 | # Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
428 | # before changing this value, check the required kubernetes version
429 | # https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
430 | apiVersion: admissionregistration.k8s.io/v1
431 | kind: ValidatingWebhookConfiguration
432 | metadata:
433 | labels:
434 | helm.sh/chart: ingress-nginx-4.0.10
435 | app.kubernetes.io/name: ingress-nginx
436 | app.kubernetes.io/instance: ingress-nginx
437 | app.kubernetes.io/version: 1.1.0
438 | app.kubernetes.io/managed-by: Helm
439 | app.kubernetes.io/component: admission-webhook
440 | name: ingress-nginx-admission
441 | webhooks:
442 | - name: validate.nginx.ingress.kubernetes.io
443 | matchPolicy: Equivalent
444 | rules:
445 | - apiGroups:
446 | - networking.k8s.io
447 | apiVersions:
448 | - v1
449 | operations:
450 | - CREATE
451 | - UPDATE
452 | resources:
453 | - ingresses
454 | failurePolicy: Fail
455 | sideEffects: None
456 | admissionReviewVersions:
457 | - v1
458 | clientConfig:
459 | service:
460 | namespace: ingress-nginx
461 | name: ingress-nginx-controller-admission
462 | path: /networking/v1/ingresses
463 | ---
464 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
465 | apiVersion: v1
466 | kind: ServiceAccount
467 | metadata:
468 | name: ingress-nginx-admission
469 | namespace: ingress-nginx
470 | annotations:
471 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
472 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
473 | labels:
474 | helm.sh/chart: ingress-nginx-4.0.10
475 | app.kubernetes.io/name: ingress-nginx
476 | app.kubernetes.io/instance: ingress-nginx
477 | app.kubernetes.io/version: 1.1.0
478 | app.kubernetes.io/managed-by: Helm
479 | app.kubernetes.io/component: admission-webhook
480 | ---
481 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
482 | apiVersion: rbac.authorization.k8s.io/v1
483 | kind: ClusterRole
484 | metadata:
485 | name: ingress-nginx-admission
486 | annotations:
487 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
488 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
489 | labels:
490 | helm.sh/chart: ingress-nginx-4.0.10
491 | app.kubernetes.io/name: ingress-nginx
492 | app.kubernetes.io/instance: ingress-nginx
493 | app.kubernetes.io/version: 1.1.0
494 | app.kubernetes.io/managed-by: Helm
495 | app.kubernetes.io/component: admission-webhook
496 | rules:
497 | - apiGroups:
498 | - admissionregistration.k8s.io
499 | resources:
500 | - validatingwebhookconfigurations
501 | verbs:
502 | - get
503 | - update
504 | ---
505 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
506 | apiVersion: rbac.authorization.k8s.io/v1
507 | kind: ClusterRoleBinding
508 | metadata:
509 | name: ingress-nginx-admission
510 | annotations:
511 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
512 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
513 | labels:
514 | helm.sh/chart: ingress-nginx-4.0.10
515 | app.kubernetes.io/name: ingress-nginx
516 | app.kubernetes.io/instance: ingress-nginx
517 | app.kubernetes.io/version: 1.1.0
518 | app.kubernetes.io/managed-by: Helm
519 | app.kubernetes.io/component: admission-webhook
520 | roleRef:
521 | apiGroup: rbac.authorization.k8s.io
522 | kind: ClusterRole
523 | name: ingress-nginx-admission
524 | subjects:
525 | - kind: ServiceAccount
526 | name: ingress-nginx-admission
527 | namespace: ingress-nginx
528 | ---
529 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
530 | apiVersion: rbac.authorization.k8s.io/v1
531 | kind: Role
532 | metadata:
533 | name: ingress-nginx-admission
534 | namespace: ingress-nginx
535 | annotations:
536 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
537 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
538 | labels:
539 | helm.sh/chart: ingress-nginx-4.0.10
540 | app.kubernetes.io/name: ingress-nginx
541 | app.kubernetes.io/instance: ingress-nginx
542 | app.kubernetes.io/version: 1.1.0
543 | app.kubernetes.io/managed-by: Helm
544 | app.kubernetes.io/component: admission-webhook
545 | rules:
546 | - apiGroups:
547 | - ''
548 | resources:
549 | - secrets
550 | verbs:
551 | - get
552 | - create
553 | ---
554 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
555 | apiVersion: rbac.authorization.k8s.io/v1
556 | kind: RoleBinding
557 | metadata:
558 | name: ingress-nginx-admission
559 | namespace: ingress-nginx
560 | annotations:
561 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
562 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
563 | labels:
564 | helm.sh/chart: ingress-nginx-4.0.10
565 | app.kubernetes.io/name: ingress-nginx
566 | app.kubernetes.io/instance: ingress-nginx
567 | app.kubernetes.io/version: 1.1.0
568 | app.kubernetes.io/managed-by: Helm
569 | app.kubernetes.io/component: admission-webhook
570 | roleRef:
571 | apiGroup: rbac.authorization.k8s.io
572 | kind: Role
573 | name: ingress-nginx-admission
574 | subjects:
575 | - kind: ServiceAccount
576 | name: ingress-nginx-admission
577 | namespace: ingress-nginx
578 | ---
579 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
580 | apiVersion: batch/v1
581 | kind: Job
582 | metadata:
583 | name: ingress-nginx-admission-create
584 | namespace: ingress-nginx
585 | annotations:
586 | helm.sh/hook: pre-install,pre-upgrade
587 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
588 | labels:
589 | helm.sh/chart: ingress-nginx-4.0.10
590 | app.kubernetes.io/name: ingress-nginx
591 | app.kubernetes.io/instance: ingress-nginx
592 | app.kubernetes.io/version: 1.1.0
593 | app.kubernetes.io/managed-by: Helm
594 | app.kubernetes.io/component: admission-webhook
595 | spec:
596 | template:
597 | metadata:
598 | name: ingress-nginx-admission-create
599 | labels:
600 | helm.sh/chart: ingress-nginx-4.0.10
601 | app.kubernetes.io/name: ingress-nginx
602 | app.kubernetes.io/instance: ingress-nginx
603 | app.kubernetes.io/version: 1.1.0
604 | app.kubernetes.io/managed-by: Helm
605 | app.kubernetes.io/component: admission-webhook
606 | spec:
607 | containers:
608 | - name: create
609 | image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.2.0
610 | imagePullPolicy: IfNotPresent
611 | args:
612 | - create
613 | - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
614 | - --namespace=$(POD_NAMESPACE)
615 | - --secret-name=ingress-nginx-admission
616 | env:
617 | - name: POD_NAMESPACE
618 | valueFrom:
619 | fieldRef:
620 | fieldPath: metadata.namespace
621 | securityContext:
622 | allowPrivilegeEscalation: false
623 | restartPolicy: OnFailure
624 | serviceAccountName: ingress-nginx-admission
625 | nodeSelector:
626 | kubernetes.io/os: linux
627 | securityContext:
628 | runAsNonRoot: true
629 | runAsUser: 2000
630 | ---
631 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
632 | apiVersion: batch/v1
633 | kind: Job
634 | metadata:
635 | name: ingress-nginx-admission-patch
636 | namespace: ingress-nginx
637 | annotations:
638 | helm.sh/hook: post-install,post-upgrade
639 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
640 | labels:
641 | helm.sh/chart: ingress-nginx-4.0.10
642 | app.kubernetes.io/name: ingress-nginx
643 | app.kubernetes.io/instance: ingress-nginx
644 | app.kubernetes.io/version: 1.1.0
645 | app.kubernetes.io/managed-by: Helm
646 | app.kubernetes.io/component: admission-webhook
647 | spec:
648 | template:
649 | metadata:
650 | name: ingress-nginx-admission-patch
651 | labels:
652 | helm.sh/chart: ingress-nginx-4.0.10
653 | app.kubernetes.io/name: ingress-nginx
654 | app.kubernetes.io/instance: ingress-nginx
655 | app.kubernetes.io/version: 1.1.0
656 | app.kubernetes.io/managed-by: Helm
657 | app.kubernetes.io/component: admission-webhook
658 | spec:
659 | containers:
660 | - name: patch
661 | image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.1.1
662 | imagePullPolicy: IfNotPresent
663 | args:
664 | - patch
665 | - --webhook-name=ingress-nginx-admission
666 | - --namespace=$(POD_NAMESPACE)
667 | - --patch-mutating=false
668 | - --secret-name=ingress-nginx-admission
669 | - --patch-failure-policy=Fail
670 | env:
671 | - name: POD_NAMESPACE
672 | valueFrom:
673 | fieldRef:
674 | fieldPath: metadata.namespace
675 | securityContext:
676 | allowPrivilegeEscalation: false
677 | restartPolicy: OnFailure
678 | serviceAccountName: ingress-nginx-admission
679 | nodeSelector:
680 | kubernetes.io/os: linux
681 | securityContext:
682 | runAsNonRoot: true
683 | runAsUser: 2000
684 |
--------------------------------------------------------------------------------
/yaml/metrics-server-components.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | k8s-app: metrics-server
6 | name: metrics-server
7 | namespace: kube-system
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRole
11 | metadata:
12 | labels:
13 | k8s-app: metrics-server
14 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
15 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
16 | rbac.authorization.k8s.io/aggregate-to-view: "true"
17 | name: system:aggregated-metrics-reader
18 | rules:
19 | - apiGroups:
20 | - metrics.k8s.io
21 | resources:
22 | - pods
23 | - nodes
24 | verbs:
25 | - get
26 | - list
27 | - watch
28 | ---
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | kind: ClusterRole
31 | metadata:
32 | labels:
33 | k8s-app: metrics-server
34 | name: system:metrics-server
35 | rules:
36 | - apiGroups:
37 | - ""
38 | resources:
39 | - nodes/metrics
40 | verbs:
41 | - get
42 | - apiGroups:
43 | - ""
44 | resources:
45 | - pods
46 | - nodes
47 | verbs:
48 | - get
49 | - list
50 | - watch
51 | ---
52 | apiVersion: rbac.authorization.k8s.io/v1
53 | kind: RoleBinding
54 | metadata:
55 | labels:
56 | k8s-app: metrics-server
57 | name: metrics-server-auth-reader
58 | namespace: kube-system
59 | roleRef:
60 | apiGroup: rbac.authorization.k8s.io
61 | kind: Role
62 | name: extension-apiserver-authentication-reader
63 | subjects:
64 | - kind: ServiceAccount
65 | name: metrics-server
66 | namespace: kube-system
67 | ---
68 | apiVersion: rbac.authorization.k8s.io/v1
69 | kind: ClusterRoleBinding
70 | metadata:
71 | labels:
72 | k8s-app: metrics-server
73 | name: metrics-server:system:auth-delegator
74 | roleRef:
75 | apiGroup: rbac.authorization.k8s.io
76 | kind: ClusterRole
77 | name: system:auth-delegator
78 | subjects:
79 | - kind: ServiceAccount
80 | name: metrics-server
81 | namespace: kube-system
82 | ---
83 | apiVersion: rbac.authorization.k8s.io/v1
84 | kind: ClusterRoleBinding
85 | metadata:
86 | labels:
87 | k8s-app: metrics-server
88 | name: system:metrics-server
89 | roleRef:
90 | apiGroup: rbac.authorization.k8s.io
91 | kind: ClusterRole
92 | name: system:metrics-server
93 | subjects:
94 | - kind: ServiceAccount
95 | name: metrics-server
96 | namespace: kube-system
97 | ---
98 | apiVersion: v1
99 | kind: Service
100 | metadata:
101 | labels:
102 | k8s-app: metrics-server
103 | name: metrics-server
104 | namespace: kube-system
105 | spec:
106 | ports:
107 | - name: https
108 | port: 443
109 | protocol: TCP
110 | targetPort: https
111 | selector:
112 | k8s-app: metrics-server
113 | ---
114 | apiVersion: apps/v1
115 | kind: Deployment
116 | metadata:
117 | labels:
118 | k8s-app: metrics-server
119 | name: metrics-server
120 | namespace: kube-system
121 | spec:
122 | selector:
123 | matchLabels:
124 | k8s-app: metrics-server
125 | strategy:
126 | rollingUpdate:
127 | maxUnavailable: 0
128 | template:
129 | metadata:
130 | labels:
131 | k8s-app: metrics-server
132 | spec:
133 | containers:
134 | - args:
135 | - --cert-dir=/tmp
136 | - --secure-port=4443
137 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
138 | - --kubelet-use-node-status-port
139 | - --metric-resolution=15s
140 | - --kubelet-insecure-tls
141 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
142 | - --requestheader-username-headers=X-Remote-User
143 | - --requestheader-group-headers=X-Remote-Group
144 | - --requestheader-extra-headers-prefix=X-Remote-Extra-
145 | image: m.daocloud.io/registry.k8s.io/metrics-server/metrics-server:v0.6.3
146 | imagePullPolicy: IfNotPresent
147 | livenessProbe:
148 | failureThreshold: 3
149 | httpGet:
150 | path: /livez
151 | port: https
152 | scheme: HTTPS
153 | periodSeconds: 10
154 | name: metrics-server
155 | ports:
156 | - containerPort: 4443
157 | name: https
158 | protocol: TCP
159 | readinessProbe:
160 | failureThreshold: 3
161 | httpGet:
162 | path: /readyz
163 | port: https
164 | scheme: HTTPS
165 | initialDelaySeconds: 20
166 | periodSeconds: 10
167 | resources:
168 | requests:
169 | cpu: 100m
170 | memory: 200Mi
171 | securityContext:
172 | allowPrivilegeEscalation: false
173 | readOnlyRootFilesystem: true
174 | runAsNonRoot: true
175 | runAsUser: 1000
176 | volumeMounts:
177 | - mountPath: /tmp
178 | name: tmp-dir
179 | - name: ca-ssl
180 | mountPath: /etc/kubernetes/pki
181 | nodeSelector:
182 | kubernetes.io/os: linux
183 | priorityClassName: system-cluster-critical
184 | serviceAccountName: metrics-server
185 | volumes:
186 | - emptyDir: {}
187 | name: tmp-dir
188 | - name: ca-ssl
189 | hostPath:
190 | path: /etc/kubernetes/pki
191 | ---
192 | apiVersion: apiregistration.k8s.io/v1
193 | kind: APIService
194 | metadata:
195 | labels:
196 | k8s-app: metrics-server
197 | name: v1beta1.metrics.k8s.io
198 | spec:
199 | group: metrics.k8s.io
200 | groupPriorityMinimum: 100
201 | insecureSkipTLSVerify: true
202 | service:
203 | name: metrics-server
204 | namespace: kube-system
205 | version: v1beta1
206 | versionPriority: 100
207 |
--------------------------------------------------------------------------------
/yaml/mysql-ha-read-write-separation.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: mysql
5 | labels:
6 | app: mysql
7 | data:
8 | master.cnf: |
9 | # Apply this config only on the master.
10 | [mysqld]
11 | log-bin
12 | slave.cnf: |
13 | # Apply this config only on slaves.
14 | [mysqld]
15 | super-read-only
16 | ---
17 | apiVersion: v1
18 | kind: Service
19 | metadata:
20 | name: mysql
21 | labels:
22 | app: mysql
23 | spec:
24 | ports:
25 | - name: mysql
26 | port: 3306
27 | clusterIP: None
28 | selector:
29 | app: mysql
30 | ---
31 | # Client service for connecting to any MySQL instance for reads.
32 | # For writes, you must instead connect to the master: mysql-0.mysql.
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: mysql-read
37 | labels:
38 | app: mysql
39 | spec:
40 | ports:
41 | - name: mysql
42 | port: 3306
43 | selector:
44 | app: mysql
45 | ---
46 | apiVersion: apps/v1
47 | kind: StatefulSet
48 | metadata:
49 | name: mysql
50 | spec:
51 | selector:
52 | matchLabels:
53 | app: mysql
54 | serviceName: mysql
55 | replicas: 3
56 | template:
57 | metadata:
58 | labels:
59 | app: mysql
60 | spec:
61 | # 设置初始化容器,进行一些准备工作
62 | initContainers:
63 | - name: init-mysql
64 | image: mysql:5.7
65 | # 为每个MySQL节点配置service-id
66 | # 如果节点序号是0,则使用master的配置, 其余节点使用slave的配置
67 | command:
68 | - bash
69 | - "-c"
70 | - |
71 | set -ex
72 | # 基于 Pod 序号生成 MySQL 服务器的 ID。
73 | [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
74 | ordinal=${BASH_REMATCH[1]}
75 | echo [mysqld] > /mnt/conf.d/server-id.cnf
76 | # 添加偏移量以避免使用 server-id=0 这一保留值。
77 | echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
78 | # Copy appropriate conf.d files from config-map to emptyDir.
79 | # 将合适的 conf.d 文件从 config-map 复制到 emptyDir。
80 | if [[ $ordinal -eq 0 ]]; then
81 | cp /mnt/config-map/master.cnf /mnt/conf.d/
82 | else
83 | cp /mnt/config-map/slave.cnf /mnt/conf.d/
84 | fi
85 | volumeMounts:
86 | - name: conf
87 | mountPath: /mnt/conf.d
88 | - name: config-map
89 | mountPath: /mnt/config-map
90 | - name: clone-mysql
91 | image: registry.cn-hangzhou.aliyuncs.com/chenby/xtrabackup:1.0
92 | # 为除了节点序号为0的主节点外的其它节点,备份前一个节点的数据
93 | command:
94 | - bash
95 | - "-c"
96 | - |
97 | set -ex
98 | # 如果已有数据,则跳过克隆。
99 | [[ -d /var/lib/mysql/mysql ]] && exit 0
100 | # 跳过主实例(序号索引 0)的克隆。
101 | [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
102 | ordinal=${BASH_REMATCH[1]}
103 | [[ $ordinal -eq 0 ]] && exit 0
104 | # 从原来的对等节点克隆数据。
105 | ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
106 | # 准备备份。
107 | xtrabackup --prepare --target-dir=/var/lib/mysql
108 | volumeMounts:
109 | - name: data
110 | mountPath: /var/lib/mysql
111 | subPath: mysql
112 | - name: conf
113 | mountPath: /etc/mysql/conf.d
114 | containers:
115 | - name: mysql
116 | image: mysql:5.7
117 | # 设置支持免密登录
118 | env:
119 | - name: MYSQL_ALLOW_EMPTY_PASSWORD
120 | value: "1"
121 | ports:
122 | - name: mysql
123 | containerPort: 3306
124 | volumeMounts:
125 | - name: data
126 | mountPath: /var/lib/mysql
127 | subPath: mysql
128 | - name: conf
129 | mountPath: /etc/mysql/conf.d
130 | resources:
131 | # 设置启动pod需要的资源,官方文档上需要500m cpu,1Gi memory。
132 | # 我本地测试的时候,会因为资源不足,报1 Insufficient cpu, 1 Insufficient memory错误,所以我改小了点
133 | requests:
134 | # m是千分之一的意思,100m表示需要0.1个cpu
135 | cpu: 1024m
136 | # Mi是兆的意思,需要100M 内存
137 | memory: 1Gi
138 | livenessProbe:
139 | # 使用mysqladmin ping命令,对MySQL节点进行探活检测
140 | # 在节点部署完30秒后开始,每10秒检测一次,超时时间为5秒
141 | exec:
142 | command: ["mysqladmin", "ping"]
143 | initialDelaySeconds: 30
144 | periodSeconds: 10
145 | timeoutSeconds: 5
146 | readinessProbe:
147 | # 对节点服务可用性进行检测, 启动5秒后开始,每2秒检测一次,超时时间1秒
148 | exec:
149 | # 检查我们是否可以通过 TCP 执行查询(skip-networking 是关闭的)。
150 | command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
151 | initialDelaySeconds: 5
152 | periodSeconds: 2
153 | timeoutSeconds: 1
154 | - name: xtrabackup
155 | image: registry.cn-hangzhou.aliyuncs.com/chenby/xtrabackup:1.0
156 | ports:
157 | - name: xtrabackup
158 | containerPort: 3307
159 | # 开始进行备份文件校验、解析和开始同步
160 | command:
161 | - bash
162 | - "-c"
163 | - |
164 | set -ex
165 | cd /var/lib/mysql
166 | # 确定克隆数据的 binlog 位置(如果有的话)。
167 | if [[ -f xtrabackup_slave_info && "x$( change_master_to.sql.in
171 | # 在这里要忽略 xtrabackup_binlog_info (它是没用的)。
172 | rm -f xtrabackup_slave_info xtrabackup_binlog_info
173 | elif [[ -f xtrabackup_binlog_info ]]; then
174 | # 我们直接从主实例进行克隆。解析 binlog 位置。
175 | [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
176 | rm -f xtrabackup_binlog_info xtrabackup_slave_info
177 | echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
178 | MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
179 | fi
180 | # 检查我们是否需要通过启动复制来完成克隆。
181 | if [[ -f change_master_to.sql.in ]]; then
182 | echo "Waiting for mysqld to be ready (accepting connections)"
183 | until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
184 | echo "Initializing replication from clone position"
185 | mysql -h 127.0.0.1 \
186 | -e "$(