├── README.md ├── ch02 ├── Dockerfile └── app.js ├── ch03 ├── kubia-gpu.yaml ├── kubia-manual-with-labels.yaml └── kubia-manual.yaml ├── ch04 ├── batch-job.yaml ├── batch-job │ └── Dockerfile ├── cronjob.yaml ├── kubia-liveness-probe-initial-delay.yaml ├── kubia-liveness-probe.yaml ├── kubia-rc.yaml ├── kubia-replicaset.yaml ├── kubia │ ├── Dockerfile │ └── app.js ├── ssd-monitor-daemonset.yaml └── ssd-monitor │ └── Dockerfile ├── ch05 ├── external-service-endpoints.yaml ├── external-service-externalname.yaml ├── external-service.yaml ├── kubia-ingress.yaml ├── kubia-rc-readinessprobe.yaml ├── kubia-rc.yaml ├── kubia-svc-headless.yaml ├── kubia-svc-loadbalancer.yaml ├── kubia-svc-namenode.yaml └── kubia-svc.yaml ├── ch06 ├── fortune-pod.yaml ├── fortune │ ├── Dockerfile │ └── fortuneloop.sh ├── mongodb-pod-pvc.yaml ├── mongodb-pv.yaml ├── mongodb-pvc.yaml └── storageclass-fast-hostpath.yaml ├── ch07 ├── config-files │ ├── my-nginx-config.conf │ └── sleep-interval ├── fortune-args │ ├── Dockerfile │ └── fortuneloop.sh ├── fortune-auth │ └── password.txt ├── fortune-env │ ├── Dockerfile │ └── fortuneloop.sh ├── fortune-pod-configmap-volume.yaml ├── fortune-pod-env-configmap.yaml ├── fortune-secret-pod.yaml ├── fortuneloop-args-pod.yaml ├── fortuneloop-env-pod.yaml ├── interval_val.txt └── pod-with-private-image.yaml ├── ch08 ├── curl.yaml ├── downward-api-pod.yaml └── downward-api-volume.yaml ├── ch09 ├── kubia-deployment-v3-with-readinescheck.yaml ├── kubia-deployment.yaml ├── kubia-rc-and-service-v1.yaml ├── v1 │ ├── Dockerfile │ └── app.js ├── v2 │ ├── Dockerfile │ └── app.js ├── v3 │ ├── Dockerfile │ └── app.js └── v4 │ ├── Dockerfile │ └── app.js └── ch10 ├── kubia-headless-service.yaml ├── kubia-pet-image ├── Dockerfile └── app.js └── kubia-statefulset.yaml /README.md: -------------------------------------------------------------------------------- 1 | ## 2. Get Started 2 | 3 | ### 2.1 Docker 容器 4 | 5 | ```shell 6 | > docker run busybox ls -lh # 运行标准的 unix 命令 7 | > docker run : # 运行指定版本的 image,tag 默认 latest 8 | 9 | # Dockerfile 包含构建 docker 镜像的命令 10 | FROM node # 基础镜像 11 | ADD app.js /app.js # 将本地文件添加到镜像的根目录 12 | ENTRYPOINT ["node", "app.js"] # 镜像被执行时需被执行的命令 13 | 14 | > docker build -t kubia . # 在当前目录根据 Dockerfile 构建指定 tag 的镜像 15 | > docker images # 列出本地所有镜像 16 | 17 | # 执行基于 kubia 镜像,映射主机 8081 到容器内 8080 端口,并在后台运行的容器 18 | > docker run --name kubia-container -p 8081:8080 -d kubia 19 | > docker ps # 列出 running 容器 20 | > docker ps -a # 列出 running, exited 容器 21 | 22 | > docker exec -it kubia-container bash # 在容器内执行 shell 命令,如 ls/sh 23 | > docker stop kubia-container # 停止容器 24 | > docker rm kubia-container # 删除容器 25 | > docker tag kubia wuyinio/kubia # 给本地镜像打标签 26 | 27 | > docker login 28 | > docker push wuyinio/kubia # push 到 DockerHub 29 | ``` 30 | 31 | ### 2.2 配置 k8s 集群 32 | 33 | ```shell 34 | > minikube start # 本地启动 minikube 单节点虚拟机 35 | > kubectl cluster-info # 查看集群各组件的 URL,是否工作正常 36 | 37 | > kubectl get nodes # get 命令可列出各种 k8s 对象的基本信息 38 | > kubectl describe node # describe 命令显示 k8s 对象更详细的信息 39 | ``` 40 | 41 | ### 2.3 在 k8s 上运行应用 42 | 43 | ```shell 44 | > kubectl run kubia --image=wuyinio/kubia --port=8080 --generator=run/v1 # 创建 rc 并拉取镜像运行 45 | > kubectl get pods # 列出 pods 46 | > kubectl expose rc kubia --type=LoadBalancer --name kubia-http # 通过 LoadBalancer 服务暴露 ClusterIP pod 服务给外部访问 47 | > kubectl get svc # 列出 services 48 | > kubectl get rc # 列出 replication controller 49 | > minikube service kubia-http # minikube 单节点不支持 LoadBalancer 服务,需手动获取服务地址 50 | # kubia-http service 的 EXTERNAL_IP 一直为 PENDING 状态 51 | 52 | > kubectl scale rc kubia --replicas=5 # 修改 rc 期望的副本数,来水平伸缩应用 53 | > kubectl get pod kubia-1ic8j -o wide # 显示 pod 详细列 54 | ``` 55 | 56 | 57 | 58 | ## 3. Pod 59 | 60 | ### 3.2 创建 Pod 61 | 62 | yaml pod 定义模块 63 | 64 | - apiVersion 与 kind 资源类型。 65 | - metadata 元数据: pod 名称、标签、注解。 66 | - spec 规格内部元件信息:容器镜像名称、卷等。 67 | 68 | kubia-manual.yaml 69 | 70 | ```yaml 71 | apiVersion: v1 72 | kind: Pod 73 | metadata: 74 | name: kubia-manual 75 | spec: 76 | containers: 77 | - image: wuyinio/kubia:2 78 | name: kubia 79 | ports: 80 | - containerPort: 8080 # pod 对外暴露的端口 81 | protocol: TCP # supported values: "SCTP", "TCP", "UDP" 82 | ``` 83 | 84 | ```shell 85 | > kubectl create -f kubia-manual.yaml # 从 yaml 文件创建 k8s 资源 86 | > kubectl get pod kubia-manual -o yaml # 导出 pod 定义 87 | > kubectl logs kubia-manual -c kubia # 查看 kubia-manual Pod 中 kubia 容器的日志,-c 显式指定容器名称 88 | > kubectl logs kubia-manual --previous # 查看崩溃前的上一个容器日志 89 | 90 | > minikube ssh && docker ps 91 | > docker logs bdb67198848d # 登录到 pod 运行时的节点 minikube,手动 docker logs 查看日志 92 | 93 | > kubectl port-forward kubia-manual 9090:8080 # 配置多重端口转发,将本机的 9090 转发至 pod 的 8080,可用于调试等 94 | port-forward kubia-manual 9090:8080 95 | Forwarding from 127.0.0.1:9090 -> 8080 96 | Forwarding from [::1]:9090 -> 8080 97 | Handling connection for 9090 # curl 127.0.0.1:9090 98 | ``` 99 | 100 | 101 | 102 | ### 3.3 标签(label) 103 | 104 | 基于组操作 pod 而非单个操作,metadata.labels 的 kv pair 标签可组织任何 k8s 资源,保存标识信息。 105 | 106 | ```yaml 107 | apiVersion: v1 108 | kind: Pod 109 | metadata: 110 | name: kubia-manual-v2 111 | labels: 112 | creation_method: manual 113 | env: prod 114 | spec: # ... 115 | ``` 116 | 117 | ```shell 118 | # 基于 Label 的增删改查操作 119 | > kubectl get pods --show-labels # 显示 labels 120 | > kubectl get pods -L env # 只显示 env 标签 121 | > kubectl label pod kubia-manual env=debug # 为指定的 pod 资源添加新标签 122 | > kubectl label pod kubia-manual env=online --overwrite=true # 修改标签 123 | > kubectl label pod kubia-manual env- # - 号删除标签 124 | ``` 125 | 126 | 127 | 128 | ### 3.5 标签选择器(nodeSelector) 129 | 130 | label selector 可筛选出具有指定值的 k8s 资源。 131 | 132 | ```shell 133 | > kubectl get pods -l env=debug # 筛选 env 为 debug 的 pods # get pod 与 get pods 无异 134 | > kubectl get pod -l creation_method!=manual # 不等 135 | > kubectl get pods -l '!env' # 不含 env 标签的 pods # -l 筛选 -L 显示 # "" 双引号会转义 136 | > kubectl get pods -l 'env in (debug)' # in 筛选 # -l 接受整体字符串为参数 137 | > kubectl get pods -l 'env notin (debug,online)' # notin 筛选 138 | ``` 139 | 140 | 在指定标签 node 上运行 pod: 141 | 142 | ```yaml 143 | apiVersion: v1 144 | kind: Pod 145 | metadata: 146 | name: kubia-gpu 147 | spec: 148 | nodeSelector: 149 | gpu: "true" # 当无可用 node 时 pod 一直处于 Pending 状态 150 | containers: #... 151 | ``` 152 | 153 | 可以使用 ` kubernetes.io/hostname: minikube` 的 nodeSelector 将 pod 运行在指定的物理机上(不建议) 154 | 155 | 156 | 157 | ### 3.6 注解(annotation) 158 | 159 | 类似 label 的 kv pair 注释,但不用作标识,用作资源说明(所以才会放在 pod metadata 的第一个子节点),可添加大量的数据块: 160 | 161 | ```shell 162 | # 增删改查和 label 操作一样 163 | > kubectl annotate pod kubia-gpu yinzige.com/gpu=10G 164 | > kubectl describe pod kubia-gpu # 出现在 metadata.annotation 165 | ``` 166 | 167 | 注:为避免标签或注解冲突,和 Java Class 使用倒序域名的方式类似,建议 key 中添加域名信息。 168 | 169 | 170 | 171 | ### 3.7 命名空间(namespace) 172 | 173 | labels 会导致资源重叠,可用 namespace 将对象分配到集群级别的隔离区域,相当于多租户的概念,以 namespace 为操作单位。 174 | 175 | ```shell 176 | > kubectl get ns # 获取所有 namespace,默认 default 下 177 | > kubectl get pods -n kube-system # 获取指定 namespace 下的资源 178 | ``` 179 | 180 | ```yaml 181 | # kubectl create namespace custom-namespace # 创建 namespace 资源 182 | apiVersion: v1 183 | kind: Namespace 184 | metadata: 185 | name: custom-namespace 186 | ``` 187 | 188 | ```yaml 189 | apiVersion: v1 190 | kind: Pod 191 | metadata: 192 | name: kubia-manual 193 | namespace: custom-namespace # 创建资源时在 metadata.namespace 中指定资源的命名空间 194 | spec: # ... 195 | ``` 196 | 197 | ```shell 198 | # 切换上下文命名空间 199 | > kubectl config set-context $(kubectl config current-context) --namespace custom-namespace 200 | ``` 201 | 202 | 203 | 204 | ### 3.8 删除 pod 205 | 206 | 删除原理:向 pod 所有容器进程定期发送 SIGTERM 信号,超时则发送 SIGKILL 强制终止。需在程序内部捕捉信号正确处理,如 Go 注册捕捉信号 `signal.Notify()` 后 select 监听该 channel 207 | 208 | ```shell 209 | > kubectl delete pod -l env=debug # 删除指定标签的 pod 210 | > kubectl delete pod --all # 删除当前 namespace 下的所有 pod (慎用) 211 | > kubectl delete all --all # 删除所有类型资源的所有对象(慎用) 212 | ``` 213 | 214 | 215 | 216 | ## 4. ReplicationController 217 | 218 | ### 4.1 容器存活探针(Liveness Probe) 219 | 220 | 将进程的重启监控从程序监控级别提升到 k8s 集群功能级别,使进程 OOM,死锁或死循环时能自动重启。pod 中各容器的探针用于暴露给 k8s ,来检查容器中的应用进程是否正常。分为 3 类: 221 | 222 | - HTTP Get:指定 IP:Port 和 Path,GET 请求返回 5xx 或超时则认为失败。 223 | - TCP Socket:是否能建立 TCP 连接。 224 | - Exec:在容器中执行任意命令,检查 `$?` 是否为 0 225 | 226 | ```yaml 227 | apiVersion: v1 228 | kind: Pod 229 | metadata: 230 | name: kubia-liveness 231 | spec: 232 | containers: 233 | - image: wuyinio/kubia-unhealthy 234 | name: kubia 235 | livenessProbe: 236 | httpGet: # 定义 http get 探针 237 | path: / # 指定路径和端口号 238 | port: 8080 239 | initialDelaySeconds: 11 # 初次探测延迟 11s 240 | ``` 241 | 242 | 存活探针原则: 243 | 244 | - 为检查设立子路径 `/health` ,确保无认证。 245 | - 保证探针返回失败时,错误发生在应用内且重启可恢复,而非应用外的组件导致的失败,那重启也没用。 246 | 247 | 注:非托管 Pod 仅由 Worker 节点的 kubelet 负责通过探针监控并重启,但整个节点崩溃会丢失该 Pod 248 | 249 | 250 | 251 | ### 4.2 ReplicationController 252 | 253 | RC 监控 Pod 列表并根据模板增删、迁移 Pod。分为 3 部分: 254 | 255 | - 标签选择器 label selector:确定要管理哪些 pod 256 | - 副本数量 replica count:指定要运行的 pod 数量 257 | - pod 模板 pod template:创建新的 pod 副本 258 | 259 | 注:修改标签选择器,会导致 rc 不再关注之前匹配的所有 pod。修改模板则只对新 Pod 生效(如手动 delete) 260 | 261 | RC 的两个功能: 262 | 263 | - 监控:确保符合标签选择器的 Pod 以指定的副本数量运行,多了则删除,少了则按 Pod 模板创建。 264 | - 扩缩容:能对监控的某组 Pod 进行动态修改副本数量进行扩缩容。 265 | 266 | ```yaml 267 | apiVersion: v1 268 | kind: ReplicationController 269 | metadata: 270 | name: kubia 271 | spec: 272 | replicas: 3 # pod 实例的期望数 273 | selector: # 决定 rc 的操作对象,可省略。必须与模板 label 匹配,否则 `selector` does not match template `labels` 274 | app: kubia 275 | template: 276 | metadata: 277 | labels: 278 | app: kubia 279 | spec: 280 | containers: 281 | - name: kubia 282 | image: wuyinio/kubia 283 | ports: 284 | - containerPort: 8080 285 | ``` 286 | 287 | 操作 RC: 288 | 289 | ```shell 290 | > kubectl describe rc kubia # 查看 rc 详细信息如 events 291 | > kubectl edit rc kubia # 修改 rc 的 yaml 配置 292 | > kubectl scale rc kubia --replicas=5 # 扩缩容 293 | > kubectl delete rc kubia --ascade=false # 删除 rc 时保留运行中的 pod # ascade 级联(关联删除) 294 | ``` 295 | 296 | 297 | 298 | ### 4.3 ReplicaSet 299 | 300 | ReplicaSet = ReplicationController + 扩展的 label selector ,即对 pod 的 label selector 表达力更强 。 301 | 302 | RS 能通过 `selector.matchLabels` 和 `selector.matchExpressio` 来扩展对 pod label 的筛选: 303 | 304 | ```yaml 305 | apiVersion: apps/v1 306 | kind: ReplicaSet 307 | metadata: 308 | name: kubia 309 | spec: 310 | replicas: 3 311 | selector: 312 | matchLabels: # 与 RC 一样必须完整匹配 313 | app: kubia 314 | matchExpressions: 315 | - key: app 316 | operator: In # 必须有 KEY 且 VALUE 在列表中 317 | values: 318 | - kubia 319 | - kubia-v2 320 | - key: app 321 | operator: NotIn # 有 KEY 则不能在如下列表中 322 | values: 323 | - KUBIA 324 | - key: env # 必须存在的 KEY,不能有 VALUE 325 | operator: Exists 326 | - key: ENV 327 | operator: DoesNotExist # 必须不能存在的 KEY,也不能有 VALUE 328 | template: 329 | metadata: 330 | labels: # Pod 模板的 label 必须能和 RS 的 selector 匹配上 331 | app: kubia 332 | env: env_exists 333 | spec: 334 | containers: 335 | - name: kubia 336 | image: yinzige/kubia 337 | ports: 338 | - protocol: TCP 339 | containerPort: 8080 340 | ``` 341 | 342 | 343 | 344 | ### 4.4 DaemonSet 345 | 346 | - 功能:保证标签匹配的 Pod 在符合 selector 的一个节点上运行一个,没有目标 pod 数量的概念,无法 scale 347 | - 场景:部署系统级组件,如 Node 监控,如 kube-proxy 处理各节点网络代理等。 348 | 349 | ```yaml 350 | apiVersion: apps/v1 351 | kind: DaemonSet 352 | metadata: 353 | name: ssd-monitor 354 | spec: 355 | selector: 356 | matchLabels: 357 | app: ssd-monitor # 指定要控制运行的一组 Pod 358 | template: 359 | metadata: 360 | labels: 361 | app: ssd-monitor # 被控制的 Pod 的标签 362 | spec: 363 | nodeSelector: # 选择 Pod 要运行的节点标签 364 | disk: ssd # 注意 YAML 文件的 true 类型是布尔型,如 ssd: true 是无法被解析为 String 的 365 | containers: 366 | - name: main 367 | image: yinzige/ssd-monitor 368 | ``` 369 | 370 | 371 | 372 | ### 4.5 Job 373 | 374 | - 功能:保证任务以指定的并发数执行指定次数,任务执行失败后按配置策略处理。 375 | - 场景:执行一次性任务。 376 | 377 | ```yaml 378 | apiVersion: batch/v1 379 | kind: Job 380 | metadata: 381 | name: batch-job 382 | spec: 383 | completions: 5 # 总任务数量 384 | parallelism: 2 # 并发执行任务数 385 | template: 386 | metadata: 387 | labels: 388 | app: batch-job # 要执行的 pod job label 389 | spec: 390 | restartPolicy: OnFailure # 任务异常结束或节点异常时处理方式:"Always", "OnFailure", "Never" 391 | containers: 392 | - name: main 393 | image: yinzige/batch-job 394 | ``` 395 | 396 | 397 | 398 | ### 4.6 CronJob 399 | 400 | 对标 Linux 的 crontab 的定时任务。 401 | 402 | ```yaml 403 | apiVersion: batch/v1beta1 404 | kind: CronJob 405 | metadata: 406 | name: cron-batch-job 407 | spec: 408 | schedule: "*/1 * * * *" # 每分钟运行一次 409 | jobTemplate: 410 | spec: 411 | template: 412 | metadata: 413 | labels: 414 | app: cron-batch-job 415 | spec: 416 | restartPolicy: OnFailure 417 | containers: 418 | - name: cron-batch-job 419 | image: yinzige/batch-job 420 | ``` 421 | 422 | 423 | 424 | ## 5. Service 425 | 426 | ### 5.1 Service 内部解析 427 | 428 | #### 接入点隔离 429 | 430 | 由于 pod 调度后 IP 会变化,需使用 Service 服务给一组 Pod 提供不变的单一接入点 entrypoint,即 `IP:Port` 431 | 432 | ```shell 433 | > kubectl expose rc kubia --type=LoadBalancer --name kubia-http # 通过服务暴露 ClusterIP pod 服务给外部访问 434 | ``` 435 | 436 | 创建名为 kubia 的 Service,将其 80 端口的请求分发给有 `app: kubia` 标签 Pod 的自定义的 http 端口上: 437 | 438 | ```yaml 439 | apiVersion: v1 440 | kind: Service 441 | metadata: 442 | name: kubia 443 | spec: 444 | sessionAffinity: ClientIP 445 | ports: 446 | - name: http 447 | port: 80 448 | targetPort: http 449 | - name: https 450 | port: 443 451 | targetPort: https # defined in pod template.spec.containers.ports array 452 | selector: 453 | app: kubia 454 | ``` 455 | 456 | 设置请求亲和性:保证一个 Client 的所有请求都只会落到同一个 Pod 上: 457 | 458 | ```yaml 459 | apiVersion: v1 460 | kind: Service 461 | metadata: 462 | name: kubia-svc-session 463 | spec: 464 | sessionAffinity: ClientIP # or None default 465 | ports: 466 | - port: 80 467 | targetPort: 8080 468 | ``` 469 | 470 | 471 | 472 | #### 服务发现 473 | 474 | 客户端和 Pod 都需知道服务本身的 IP 和 Port,才能与其背后的 Pod 进行交互。 475 | 476 | - 环境变量:`kubectl exec kubia-qgtmw env` 会看到 Pod 的环境变量列出了 Pod 创建时的所有服务地址和端口,如 SVCNAME_SERVICE_HOST 和 SVCNAME_SERVICE_PORT 指向服务。 477 | 478 | - DNS 发现:Pod 上通过全限定域名 FQDN 访问服务:`..svc.cluster.local` 479 | 480 | ```shell 481 | > kubectl exec kubia-qgtmw cat /etc/resolv.conf 482 | nameserver 10.96.0.10 483 | search default.svc.cluster.local svc.cluster.local cluster.local # 会 484 | options ndots:5 485 | ``` 486 | 487 | 在 Pod kubia-qgtmw 中可通过访问 `kubia.default.svc.cluster.local` 来访问 kubia 服务,在 `/etc/resolv.conf` 中指明了域名解析服务器地址,以及主机名补全规则,是在 Pod 创建时候,根据 namespace 手动导入的。 488 | 489 | 490 | 491 | ### 5.2 Service 对内部解析外部 492 | 493 | 集群内部的 Pod 不直连到外部的 IP:Port,而是同样定义 Service 结合外部 endpoints 做代理中间层解耦。如获取百度首页的向外解析: 494 | 495 | 1.1 建立外部目标的 endpoints 资源: 496 | 497 | ```yaml 498 | apiVersion: v1 499 | kind: Endpoints 500 | metadata: 501 | name: baidu-endpoints 502 | 503 | subsets: 504 | - addresses: 505 | - ip: 220.181.38.148 # baidu.com 506 | - ip: 39.156.69.79 507 | ports: 508 | - port: 80 509 | ``` 510 | 511 | 1.2 或者建立外部解析别名 512 | 513 | ```yaml 514 | apiVersion: v1 515 | kind: Service 516 | metadata: 517 | name: baidu-endpoints 518 | spec: 519 | type: ExternalName 520 | externalName: www.baidu.com 521 | ports: 522 | - port: 80 523 | ``` 524 | 525 | 2. 再建立同名的 Service 代理,标识使用上边这组 endpoints 526 | 527 | ```yaml 528 | apiVersion: v1 529 | kind: Service 530 | metadata: 531 | name: baidu-endpoints 532 | spec: 533 | ports: 534 | - port: 80 535 | ``` 536 | 537 | 3. 效果:在集群内部 Pod 上可透过名为 baidu-endpoints 的 Service 连接到百度首页: 538 | 539 | ```shell 540 | # root@kubia-72sxt:/# curl 10.103.134.52 541 | root@kubia-72sxt:/# curl baidu-endpoints 542 | 543 | 544 | 545 | ``` 546 | 547 | 注意 Service 类型: 548 | 549 | ```shell 550 | > kubectl get svc 551 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 552 | baidu-endpoints ExternalName www.baidu.com 80/TCP 30m 553 | kubernetes ClusterIP 10.96.0.1 443/TCP 5d2h 554 | kubia ClusterIP 10.96.239.1 80/TCP,443/TCP 87m 555 | ``` 556 | 557 | 558 | 559 | ### 5.3 Service 对外部解析内部 560 | 561 | #### 5.3.1 NameNode 562 | 563 | 使用:外部客户端直连宿主机端口访问服务。 564 | 565 | 原理:在集群所有节点暴露指定的端口给外部客户端,该端口会将请求转发 Service 进一步转发给节点上符合 label 的 Pod,即 Service 从所有节点收集指定端口的请求并分发给能处理的 Pod 566 | 567 | 缺点:高可用性需由外部客户端保证,若节点下线需及时切换。 568 | 569 | ```yaml 570 | apiVersion: v1 571 | kind: Service 572 | metadata: 573 | name: kubia-nodeport 574 | spec: 575 | type: NodePort 576 | ports: 577 | - port: 80 578 | targetPort: 8080 579 | nodePort: 30001 580 | selector: 581 | app: kubia 582 | ``` 583 | 584 | 三个端口号,节点转发 30001,Service 转发 80: 585 | 586 | 宿主机即外部,执行 `curl MINIKUBE_NODE_IP:30001` 会被转发到有 `app:kubia` 标签的 Pod 的 8080 端口。执行 `curl NAME_PORT:80` 同理。 587 | 588 | 589 | 590 | ### 5.3.2 LoadBalancer 591 | 592 | 场景:外部客户端直连 LB 访问服务。其是 K8S 集群端高可用的 NameNode 扩展。 593 | 594 | ```yaml 595 | apiVersion: v1 596 | kind: Service 597 | metadata: 598 | name: kubia-loadbalancer 599 | spec: 600 | type: LoadBalancer 601 | ports: 602 | - port: 80 603 | targetPort: 8080 604 | selector: 605 | app: kubia 606 | ``` 607 | 608 | k8s `app:kubia` 所在的所有节点打开随机端口 **32148**,进一步转发给 Pod 的 8080 端口。 609 | 610 | ``` 611 | kubia-loadbalancer LoadBalancer 10.108.104.22 80:32148/TCP 4s 612 | ``` 613 | 614 | 615 | 616 | ### 5.4 Ingress 617 | 618 | 顶级转发代理资源,仅通过一个 IP 即可代理转发后端多个 Service 的请求。需要开启 nginx controller 功能 619 | 620 | ```yaml 621 | apiVersion: networking.k8s.io/v1beta1 622 | kind: Ingress 623 | metadata: 624 | name: kubia 625 | spec: 626 | rules: 627 | - host: "kubia.example.com" 628 | http: 629 | paths: 630 | - path: /kubia # 将 /kubia 子路径请求转发到 kubia-nodeport 服务的 80 端口 631 | backend: 632 | serviceName: kubia-nodeport 633 | servicePort: 80 634 | - path: /user # 可配置多个 path 对应到 service 635 | backend: 636 | serviceName: user-svc 637 | servicePort: 90 638 | - host: "new.example.com" # 可配置多个 host 639 | http: 640 | paths: 641 | - path: / 642 | backend: 643 | serviceName: gooele 644 | servicePort: 8080 645 | ``` 646 | 647 | 648 | 649 | ### 5.5 就绪探针 650 | 651 | 场景:pod 启动后并非立刻就绪,需延迟接收来自 service 的请求。若不定义就绪探针,pod 启动就会暴露给 service 使用,所以需像存活探针一样添加指定类型的就绪探针: 652 | 653 | ```yaml 654 | #... 655 | spec: 656 | containers: 657 | - name: kubia-container 658 | image: yinzige/kubia 659 | readinessProbe: 660 | exec: 661 | command: 662 | - ls 663 | - /var/ready_now 664 | ``` 665 | 666 | 667 | 668 | ### 5.6 headless 669 | 670 | 场景:向客户端暴露所有 pod 的 IP,将 ClusterIP 置为 None 即可: 671 | 672 | ```yaml 673 | apiVersion: v1 674 | kind: Service 675 | metadata: 676 | name: kubia-headless 677 | spec: 678 | clusterIP: None 679 | ports: 680 | - port: 80 681 | targetPort: 8080 682 | selector: 683 | app: kubia 684 | ``` 685 | 686 | k8s 不会为 headless 服务分配 IP,通过 DNS 可直接发现后端的所有 Pod 687 | 688 | ```shell 689 | > kubectl get svc 690 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 691 | kubia LoadBalancer 10.104.138.112 80:32110/TCP 123m 692 | kubia-headless ClusterIP None 80/TCP 10m 693 | 694 | root@dnsutils:/# nslookup kubia-headless 695 | Server: 10.96.0.10 696 | Address: 10.96.0.10#53 697 | 698 | Name: kubia-headless.default.svc.cluster.local 699 | Address: 172.17.0.6 700 | Name: kubia-headless.default.svc.cluster.local 701 | Address: 172.17.0.12 702 | Name: kubia-headless.default.svc.cluster.local 703 | Address: 172.17.0.10 704 | Name: kubia-headless.default.svc.cluster.local 705 | Address: 172.17.0.8 706 | 707 | root@dnsutils:/# nslookup kubia 708 | Server: 10.96.0.10 709 | Address: 10.96.0.10#53 710 | 711 | Name: kubia.default.svc.cluster.local 712 | Address: 10.104.138.112 713 | ``` 714 | 715 | ## Ch6. Volume 716 | 717 | ### 6.1 卷介绍 718 | 719 | - 问题:Pod 中每个容器的文件系统来自镜像,相互独立。 720 | - 解决:使用存储卷,让容器访问外部磁盘空间、容器间共享存储。 721 | 722 | 卷是 Pod 生命周期的一部分,不是 k8s 资源对象。Pod 启动时创建,删除时销毁(文件可选保留)。用于 Pod 中挂载到多个容器进行文件共享。 723 | 724 | 卷类型: 725 | 726 | - emptyDir:存放临时数据的临时空目录。 727 | - hostPath:将 k8s worker 节点的系统文件挂载到 Pod 中,常用于单节点集群的持久化存储。 728 | - persistentVolumeClaim:PVC 持久卷声明,用于预配置 PV 729 | 730 | ### 6.2 emptyDir 卷 731 | 732 | 1 个 Pod 中 2 个容器使用同一个 emptyDir 卷 html,来共享文件夹,随 Pod 删除而清除,属于非持久化存储。 733 | 734 | ```yaml 735 | apiVersion: v1 736 | kind: Pod 737 | metadata: 738 | name: fortune 739 | spec: 740 | containers: 741 | - name: html-generator 742 | image: yinzige/fortuneloop 743 | volumeMounts: 744 | - name: html 745 | mountPath: /var/htdocs # 将 html 的卷挂载到 html-generator 容器的 /var/htdocs 目录 746 | - name: web-server 747 | image: nginx:alpine 748 | volumeMounts: 749 | - name: html 750 | mountPath: /usr/share/nginx/html # 将 html 的卷挂载 web-server 容器到 /usr/share/nginx/html 目录 751 | readOnly: true # 设置只读 752 | volumes: 753 | - name: html # 声明名为 emptyDir 的 emptyDir 卷 754 | emptyDir: {} 755 | ``` 756 | 757 | emptyDir 卷跟随 Pod 被 k8s 自动分配在宿主机指定目录:`/var/lib/kubelet/pods/PODUID/volumes/kubernetes.io~empty-dir/VOLUMENAME` 758 | 759 | 如上的 html 卷位置在 minikube 节点: 760 | 761 | ```shell 762 | $ sudo ls -l /var/lib/kubelet/pods/144c55eb-edf5-4b44-a2f6-a0d9cfe04f7c/volumes/kubernetes.io~empty-dir/html 763 | total 4 764 | -rw-r--r-- 1 root root 80 Apr 26 05:01 index.html 765 | ``` 766 | 767 | 768 | 769 | ### 6.3 hostPath 卷 770 | 771 | hostPath 卷的数据不跟随 Pod 生命周期,下一个调度至此节点的 Pod 能继续使用前一个 Pod 留下的数据,pod 和节点是强耦合的,只适合单节点部署。 772 | 773 | 774 | 775 | ### 6.5 持久化卷 PV、持久化卷声明 PVC 776 | 777 | PV 与 PVC 用于解耦 Pod 与底层存储。PV、PVC 与底层存储关系: 778 | 779 | ![](http://images.yinzige.com/2019-08-21-052202.png) 780 | 781 | 流程: 782 | 783 | - 管理员向集群加入节点时准备 NFS 等存储资源(TODO ) 784 | - 管理员创建指定大小和访问模式的 PV 785 | - 用户创建需要大小的 PVC 786 | - K8S 寻找符合 PVC 的 PV 并绑定 787 | - 用户在 Pod 中通过卷引用 PVC,从而使用存储 PV 资源 788 | 789 | Admin 通过网络存储创建 PV: 790 | 791 | ```yaml 792 | apiVersion: v1 793 | kind: PersistentVolume # 创建持久卷 794 | metadata: 795 | name: mongodb-pv 796 | spec: 797 | capacity: 798 | storage: 1Gi # 告诉 k8s 容量大小和多个客户端挂载时的访问模式 799 | accessModes: 800 | - ReadWriteOnce 801 | - ReadOnlyMany 802 | persistentVolumeReclaimPolicy: Retain # Cycle / Delete 标识 PV 删除后数据的处理方式 803 | hostPath: # 持久卷绑定到本地的 hostPath 804 | path: /tmp/mongodb 805 | ``` 806 | 807 | User 通过创建 PVC 来找到大小、容量均匹配的 PV 并绑定: 808 | 809 | ```yaml 810 | apiVersion: v1 811 | kind: PersistentVolumeClaim 812 | metadata: 813 | name: mongodb-pvc # pvc 名称将在 pod 中引用 814 | spec: 815 | resources: 816 | requests: 817 | storage: 1Gi 818 | accessModes: 819 | - ReadWriteOnce 820 | storageClassName: "" # 手动绑定 PVC 到已存在的 PV,否则有值就是等待绑定到匹配的新 PV 821 | ``` 822 | 823 | User 创建 Pod 使用 PVC: 824 | 825 | ```yaml 826 | kind: Pod 827 | metadata: 828 | name: mongodb 829 | spec: 830 | containers: 831 | - image: mongo 832 | name: mongodb 833 | volumeMounts: 834 | - name: mongodb-data 835 | mountPath: /tmp/data 836 | ports: 837 | - containerPort: 27017 838 | protocol: TCP 839 | volumes: 840 | - name: mongodb-data 841 | persistentVolumeClaim: # pod 中通过 claimName 指定要引用的 PVC 名称 842 | claimName: mongodb-pvc 843 | ``` 844 | 845 | PV 设置卷的三种访问模式: 846 | 847 | - RWO:ReadWriteOnly:仅允许单个节点挂载读写 848 | - ROX:ReadOnlyMany :允许多个节点挂载只读 849 | - RWX:ReadWriteMany:允许多个节点挂载读写 850 | 851 | 注:PV 是集群级别的存储资源,PVC 和 Pod 是命名空间范围的。所以,在 A 命名空间的 PVC 和在 B 命名空间的 PVC 都有可能绑到同一个 PV 上。 852 | 853 | 854 | 855 | ### 6.6 动态 PV:存储类 StorageClass 856 | 857 | 场景:进一步解耦 Pod 与 PVC,使 Pod 不依赖 PVC 名称,而且跨集群移植只需保证 SC 一致即可,不用管 PVC 和 PV。同时还能给不同 PV 进行归档如按硬盘属性进行分类。 858 | 859 | ```yaml 860 | apiVersion: storage.k8s.io/v1 861 | kind: StorageClass 862 | metadata: 863 | name: fast 864 | provisioner: k8s.io/minikube-hostpath # 指定 SC 收到创建 PVC 请求时应调用哪个组件进行处理并返回 PV 865 | parameters: 866 | type: pd-ssd 867 | ``` 868 | 869 | 870 | 871 | 总流程:可创建 StorageClass 存储类资源,用于分类 PV,在 PVC 中绑定到符合条件的 PV 上。 872 | 873 | ![](http://images.yinzige.com/2019-08-21-054515.png) 874 | 875 | 876 | 877 | ## 7. 配置传递:ConfigMap 与 Secret 878 | 879 | ### 7.1 配置容器化应用程序 880 | 881 | 三种配置方式: 882 | 883 | - 向容器传递命令行参数。 884 | - 为每个容器设置环境变量。 885 | - 通过卷将配置文件挂载至容器中。 886 | 887 | 容器传递配置文件的问题:修改配置需重新构建镜像,配置文件完全公开。解决:配置使用 ConfigMap 或 Secret 卷挂载。 888 | 889 | ### 7.2 向容器传递命令行参数 890 | 891 | Dockerfile 中 `ENTRYPOINT` 为命令,`CMD` 为其参数。但参数能被 `docker run ` 中的参数覆盖。 892 | 893 | ```yaml 894 | ENTRYPOINT ["/bin/fortuneloop.sh"] # 在脚本中通过 $1 获取 CMD 第一个参数,Go 中 os.Args[1] 类似 895 | CMD ["10", "11"] 896 | ``` 897 | 898 | 二者等同于 Pod 中的 `command` 和 `args`,但 pod 可通过 image 的 command 和 args 子标签进行覆盖,注意参数必须是字符串: 899 | 900 | ```yaml 901 | apiVersion: v1 902 | kind: Pod 903 | metadata: 904 | name: fortune2s 905 | spec: 906 | containers: 907 | - image: luksa/fortune:args 908 | args: ["2"] 909 | # ... 910 | ``` 911 | 912 | ### 7.3 为容器设置环境变量 913 | 914 | 只能在各容器级别注入环境变量,而非 Pod 级别。配置容器部分 `spec.containers.env` 指定即可: 915 | 916 | ```yaml 917 | apiVersion: v1 918 | kind: Pod 919 | metadata: 920 | name: fortune3s 921 | spec: 922 | containers: 923 | - image: luksa/fortune:env 924 | env: 925 | - name: INTERVAL # 对应到容器 html-generator 中的 $INTERVAL 926 | value: "5" 927 | - name: "NESTED_VAR" 928 | value: "$(INTERVAL)_1" # 可引用其他环境变量 929 | name: html-generator 930 | # ... 931 | ``` 932 | 933 | 缺点:硬编码环境变量可能在多个环境下值不同无法复用,需将配置项解耦。 934 | 935 | ### 7.4 ConfigMap 卷 936 | 937 | 存储非敏感信息的文本配置文件。 938 | 939 | ```shell 940 | # 创建 cm 的四种方式:可从 kv 字面量、配置文件、有命名配置文件、目录下所有配置文件 941 | > kubectl create configmap fortune-config --from-literal=sleep-interval=25 # 从 kv 字面量创建 cm 942 | > kubectl create configmap fortune-config --from-file=nginx-conf=my-nginx-config.conf # 指定 k 的文件创建 cm 943 | ``` 944 | 945 | 两种方式将 cm 中的值传递给 Pod 中的容器: 946 | 947 | #### 7.4.1 设置环境变量或命令行参数 948 | 949 | ```yaml 950 | apiVersion: v1 951 | kind: Pod 952 | metadata: 953 | name: fortune-env-from-configmap 954 | spec: 955 | containers: 956 | - image: luksa/fortune:env 957 | name: html-generator 958 | env: 959 | - name: INTERVAL # 取 CM fortune-config 中的 sleep-interval,作为 html-generator 容器环境变量 INTERVAL 的值 960 | valueFrom: 961 | configMapKeyRef: 962 | name: fortune-config-cm 963 | key: sleep-interval 964 | envFrom: # 批量导入 cm 的所有 kv 作为环境变量,并加上前缀 965 | - prefix: CONF_ 966 | configMapRef: 967 | name: fortune-config-cm 968 | # ... 969 | ``` 970 | 971 | 可使用 `kubectl get cm fortune-config -o yaml` 查看 CM 的 data 配置项。 972 | 973 | #### 7.4.2 配置 ConfigMap 卷 974 | 975 | 当配置项过长需放入配置文件时,可将配置文件暴露为 cm 并用卷引用,从而在各容器内部挂载读取。 976 | 977 | ```yaml 978 | apiVersion: v1 979 | kind: Pod 980 | metadata: 981 | name: fortune-configmap-volume 982 | spec: 983 | containers: 984 | - name: html-generator 985 | image: yinzige/fortuneloop:env 986 | env: 987 | - name: INTERVAL 988 | valueFrom: 989 | configMapKeyRef: 990 | key: sleep-interval # raw key file name 991 | name: fortune-config # cm name 992 | volumeMounts: 993 | - mountPath: /var/htdocs 994 | name: html 995 | - name: web-server 996 | image: nginx:alpine 997 | volumeMounts: 998 | - mountPath: /usr/share/nginx/html 999 | name: html 1000 | readOnly: true 1001 | - mountPath: /etc/nginx/conf.d/gzip_in.conf 1002 | name: config 1003 | subPath: gzip.conf # 使用 subPath 只挂载部分卷 gzip.conf 到指定目录下指定文件 gzip_in.conf 1004 | readOnly: true 1005 | ports: 1006 | - containerPort: 80 1007 | name: http 1008 | protocol: TCP 1009 | volumes: 1010 | - name: html 1011 | emptyDir: {} 1012 | - name: config 1013 | configMap: 1014 | name: fortune-config # cm name 1015 | defaultMode: 0666 # 设置卷文件读写权限 1016 | items: # 使用 items 限制从 cm 暴露给卷的文件 1017 | - key: my-nginx-config.conf 1018 | path: gzip.conf # 把 key 文件的值 copy 一份到新文件中 1019 | 1020 | ``` 1021 | 1022 | 添加 `items` 来暴露指定的文件到卷中,`subPath` 用来挂载部分卷,而不隐藏容器目录原有的初始文件。 1023 | 1024 | ```shell 1025 | > kubectl exec fortune-configmap-volume -c web-server -it -- ls -lA /etc/nginx/conf.d 1026 | total 8 1027 | -rw-r--r-- 1 root root 1093 Apr 14 14:46 default.conf # subPath 1028 | -rw-rw-rw- 1 root root 242 Apr 27 16:49 gzip_in.conf 1029 | ``` 1030 | 1031 | #### 7.4.3 ConfigMap 场景 1032 | 1033 | 使用 `kubectl edit cm fortune-config` 修后,容器中对应挂载的卷文件会延迟将修改同步。问题:若 pod 应用不支持配置文件的热更新,那同步了的修改并不会再旧 pod 生效,反而新起的 pod 会生效,造成新旧配置共存的问题。 1034 | 1035 | 场景:cm 的特性是不变性,若 pod 应用本身支持热更新,则可修改 cm 动态更新,但注意有 k8s 的监听延迟。 1036 | 1037 | 1038 | 1039 | ### 7.5 Secret 1040 | 1041 | 存储敏感的配置数据,大小限制 1MB,其配置条目会以 Base64 编码二进制后存储: 1042 | 1043 | ```shell 1044 | > kubectl create secret generic fortune-auth --from-file=fortune-auth/ # password.txt 1045 | ``` 1046 | 1047 | 在 pod 中加载: 1048 | 1049 | ```yaml 1050 | apiVersion: v1 1051 | kind: Pod 1052 | metadata: 1053 | name: fortune-with-serect 1054 | spec: 1055 | containers: 1056 | - name: fortune-auth-main 1057 | image: yinzige/fortuneloop 1058 | volumeMounts: 1059 | - mountPath: /tmp/no_password.txt 1060 | subPath: password.txt 1061 | name: auth 1062 | volumes: 1063 | - name: auth 1064 | secret: 1065 | secretName: fortune-auth 1066 | ``` 1067 | 1068 | 读取正常: 1069 | 1070 | ```shell 1071 | > kubectl exec fortune-with-serect -it -- ls -lh /tmp 1072 | total 4.0K 1073 | -rw-r--r-- 1 root root 10 Apr 27 17:51 no_password.txt 1074 | > kubectl exec fortune-with-serect -it -- mount | grep password 1075 | tmpfs on /tmp/no_password.txt type tmpfs (ro,relatime) # secret 仅存储在内存中 1076 | ``` 1077 | 1078 | secret 可用于从镜像仓库中拉取 private 镜像,需配置专用的 secret 使用: 1079 | 1080 | ```yaml 1081 | apiVersion: v1 1082 | kind: Pod 1083 | spec: 1084 | imagePullSecrets: 1085 | - name: dockerhub-secret 1086 | containers: # ... 1087 | ``` 1088 | 1089 | 1090 | 1091 | ## 8. Pod Metadata 与 k8s API 1092 | 1093 | 场景:从 Pod 中的容器应用进程访问 Pod 元数据及其他资源。 1094 | 1095 | ### 8.1 使用 Downward API 传递 Pod Metadata 1096 | 1097 | 问题:配置数据如环境变量、ConfigMap 都是预设的,应用内无法直接获取如 Pod IP 等动态数据。 1098 | 1099 | 解决:Downward API 通过环境变量、downward API 卷来传递 Pod 的元数据。如:Pod 名称、IP、命名空间、标签、节点名、每个容器的 CPU、内存限制及其使用量。 1100 | 1101 | #### 8.1.1 环境变量透传 1102 | 1103 | 在 Pod 定义中手动将容器需要的元数据,以环境变量的形式手动透传给容器: 1104 | 1105 | ```yaml 1106 | apiVersion: v1 1107 | kind: Pod 1108 | metadata: 1109 | name: downward 1110 | spec: 1111 | containers: 1112 | - name: main 1113 | image: busybox 1114 | command: 1115 | - "sleep" 1116 | - "1000" 1117 | env: 1118 | - name: E_POD_NAMESPACE 1119 | valueFrom: 1120 | fieldRef: 1121 | fieldPath: metadata.namespace 1122 | - name: E_POD_IP 1123 | valueFrom: 1124 | fieldRef: 1125 | fieldPath: status.podIP # 运行时元数据 1126 | - name: E_REQ_CPU 1127 | valueFrom: 1128 | resourceFieldRef: # 引用容器级别的数据,如请求的 CPU、内存用量等需引用 resourceFieldRef 1129 | resource: requests.cpu 1130 | divisor: 1m # 资源单位 1131 | - name: E_LIMIT_MEM 1132 | valueFrom: 1133 | resourceFieldRef: 1134 | resource: limits.memory 1135 | divisor: 1Ki 1136 | ``` 1137 | 1138 | 效果: 1139 | 1140 | ```shell 1141 | k exec downward -it -- env | grep -e "^E_" 1142 | E_POD_NAMESPACE=default 1143 | E_POD_IP=172.17.0.8 1144 | E_REQ_CPU=0 1145 | E_LIMIT_MEM=2085844 1146 | ``` 1147 | 1148 | 缺点:无法通过环境传递 pod 标签和注解等可在运行时动态修改的元数据。 1149 | 1150 | ```shell 1151 | ERROR: error converting fieldPath: field label not supported: metadata.lebels.app 1152 | ``` 1153 | 1154 | 1155 | 1156 | #### 8.1.2 通过 downwardAPI 卷 1157 | 1158 | ```yaml 1159 | apiVersion: v1 1160 | kind: Pod 1161 | metadata: 1162 | name: downward 1163 | labels: 1164 | foo: bar 1165 | annotations: 1166 | k1: v1 1167 | spec: 1168 | containers: 1169 | - name: main 1170 | # ... 1171 | volumeMounts: 1172 | - name: downward 1173 | mountPath: /etc/downward 1174 | volumes: 1175 | - name: downward # 定义一个名为 downward 的 DownwardAPI 卷,将元数据写入 items 下指定路径的文件中 1176 | downwardAPI: 1177 | items: 1178 | - path: "container_request_memory" 1179 | resourceFieldRef: 1180 | containerName: main # 容器级别的元数据需指定容器名 1181 | resource: requests.cpu 1182 | divisor: 1m 1183 | divisor: 1m 1184 | - path: "labels" # "annotations" # pod 的标签和注解必须使用 downwardAPI 卷去访问 1185 | fieldRef: 1186 | fieldPath: metadata.labels 1187 | ``` 1188 | 1189 | 效果:k8s 会自动地将 pod 的标签和注解同步到 downward API 卷的指定文件中。 1190 | 1191 | ```shell 1192 | > kubectl exec downward-volume-pod -it -- ls /etc/downward 1193 | container_request_memory pod_annotations pod_labels 1194 | 1195 | > kubectl exec downward-volume-pod -it -- cat /etc/downward/pod_annotations 1196 | key1="VALUE1" 1197 | key2="VALUE2\nVALUE20\nVALUE200\n" 1198 | kubernetes.io/config.seen="2020-04-28T04:15:29.722938998Z" 1199 | kubernetes.io/config.source="api" 1200 | 1201 | > kubectl annotate pod downward-volume-pod new_key=NEW_VALUE 1202 | pod/downward-volume-pod annotated 1203 | 1204 | > kubectl exec downward-volume-pod -it -- tail -2 /etc/downward/pod_annotations 1205 | kubernetes.io/config.source="api" 1206 | new_key="NEW_VALUE" 1207 | ``` 1208 | 1209 | 1210 | 1211 | ### 8.2 与 k8s API 交互 1212 | 1213 | 问题:downward API 只能向应用暴露 1 个 Pod 的部分元数据,无法提供其他 Pod 和其他资源信息。 1214 | 1215 | 请求 k8s API: 1216 | 1217 | - 外部:通过 kubectl proxy 中间请求转发代理。 1218 | - 内部:从 Pod 内验证 Secret 卷的 crt 证书、传递 token 到来对本 namespace 内的资源进行操作。 1219 | 1220 | Pod 与 API 服务器交互流程: 1221 | 1222 | - 应用通过 secret 卷下的 `ca.crt` 验证 API 地址 1223 | - 应用带上 TOKEN 授权 1224 | - 操作 pod 所在命名空间内的资源 1225 | 1226 | ```shell 1227 | > ls /var/run/secrets/kubernetes.io/serviceaccount/ 1228 | ca.crt # 验证 API 服务器证书,避免中间人攻击 1229 | namespace # 获取本地 pod 的 namespace: default 1230 | token # 通过 header 添加 "Authorization: Bearer $TOKEN" 方式来获取授权 1231 | 1232 | > curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes # 验证 API 地址 1233 | > export CURL_CA_BUNDLE=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt 1234 | 1235 | > TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) 1236 | > curl -H "Authorization: Bearer $TOKEN" https://kubernetes # 授权 1237 | 1238 | > NS=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) 1239 | > curl -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/namespaces/$NS/pods # 操作本地命名空间资源 1240 | ``` 1241 | 1242 | 折中的 Pod 内部公共透明代理模式:在 pod 内部通过 ambassador 公共容器简化 API 服务器验证流程。普通容器走 HTTP 到 ambassador 容器,后者走 secret 流程走 HTTPS 与 k8s API 交互。 1243 | 1244 | 1245 | 1246 | ## 9. Deployment 1247 | 1248 | 场景:定义比例滚动升级,出错自动回滚。 1249 | 1250 | ### 9.1 纯手动更新运行在 Pod 内的应用程序 1251 | 1252 | 版本升级方式: 1253 | 1254 | - 先删旧 Pod,再自动创建新 Pod:存在不可用间隔。 1255 | 1256 | 修改 Pod spec.template 中的标签选择器,选择新版 tag 对应的应用,先手动删除旧版本,RS 自动创建新版容器。 1257 | 1258 | - 创建新 Pod,同时逐步剔除旧 Pod:需两个版本应用都能对外服务,短时间内 Pod 数量翻倍。 1259 | 1260 | 修改 Service 的 pod selector 蓝绿部署进度可控地将流量切换到新 Pod 上。 1261 | 1262 | ### 9.2 基于 RC 的滚动升级 1263 | 1264 | 问题:手动脚本将旧 Pod 缩容,新 Pod 扩容,易出错。 1265 | 1266 | 解决:使用 kubectl 请求 k8s API 来执行滚动升级: 1267 | 1268 | ```shell 1269 | # 指定需更新的 RC kubia-v1,用指定的 image 创建的新 RC 来替换 1270 | > kubectl rolling-update kubia-v1 kubia-v2 --image=wuyinio/kubia:v2 # 从 1.8 已移除 1271 | ``` 1272 | 1273 | 过程:kubectl 为新旧 Pod、新旧 RC 添加 deployment 标签,并向 k8s API 请求对旧 Pod 进行缩容,对新 Pod 扩容,透明地将 service 的标签匹配到新 pod 上。 1274 | 1275 | 原理:由客户端 kebectl 动态地修改两个 RC 的标签,缩容旧 Pod,扩容新 Pod,最终完成流量转移。 1276 | 1277 | 1278 | 1279 | ### 9.3 使用 Deployment 声明式升级 1280 | 1281 | 问题:RC 滚动升级是 kubectl 客户端控制升级,若网络断开连接则升级中断(如关闭终端),Pod 和 RC 会处于多版本混合态。 1282 | 1283 | 解决:在服务端使用高级资源 Deployment 声明来协调新旧两个 RS 的扩缩容,用户只定义最终目标收敛状态。 1284 | 1285 | deployment 也分为 3 部分:标签选择器、期望副本数、Pod 模板: 1286 | 1287 | ```yaml 1288 | apiVersion: apps/v1 1289 | kind: Deployment 1290 | metadata: 1291 | name: kubia 1292 | spec: 1293 | replicas: 3 1294 | template: 1295 | metadata: 1296 | name: kubia 1297 | labels: 1298 | app: kubia 1299 | spec: 1300 | containers: 1301 | - name: nodejs 1302 | image: yinzige/kubia:v1 1303 | selector: 1304 | matchLabels: 1305 | app: kubia 1306 | ``` 1307 | 1308 | ```shell 1309 | # 创建 deployment 1310 | > kubectl create -f kubia-deployment-v1.yaml --record # record 选项将记录历史版本号,用于后续回滚 1311 | > kubectl rollout status deployment kubia # rollout 显示部署状态 1312 | 1313 | # deployment 用 pod 模板的哈希值创建 RS,再由 RS 创建 Pod 并管理,哈希值一致 1314 | > kubectl get pods 1315 | NAME READY STATUS RESTARTS AGE 1316 | kubia-5b9f8f4d84-nxmqd 1/1 Running 0 2s 1317 | kubia-5b9f8f4d84-q5wc5 1/1 Running 0 2s 1318 | kubia-5b9f8f4d84-r866t 1/1 Running 0 2s 1319 | > kubectl get rs 1320 | NAME DESIRED CURRENT READY AGE 1321 | kubia-5b9f8f4d84 3 3 3 9s 1322 | ``` 1323 | 1324 | deployment 的升级策略:`spec.stratagy` 1325 | 1326 | - RollingUpdate:渐进式删除旧 Pod。新旧版混合 1327 | - Recreate:一次性删除所有旧 Pod,重建新 Pod。中间服务不可用 1328 | 1329 | #### 9.3.1 触发滚动升级 1330 | 1331 | 先指定 Pod 就绪后的等待时间:` kubectl patch deployment kubia -p '{"spec": {"minReadySeconds": 10}}'` 1332 | 1333 | 修改某个容器的镜像来触发升级:`kubectl set image deployment kubia nodejs=yinzige/kubia:v2` 1334 | 1335 | 注:触发升级需真正修改到 deployment 的字段。 1336 | 1337 | 原理:`kubectl get rs` 可看到保留了的新旧版 rs,deployment 资源在 k8s master 端会自动控制新旧 RS 的扩缩容。 1338 | 1339 | #### 9.3.2 回滚 1340 | 1341 | ```shell 1342 | > kubectl rollout undo deployment kubia # 回滚到上一次 deployment 部署的版本 1343 | > kubectl rollout history deployment kubia # 创建 deployment 时 --record,此处显示版本 1344 | > kubectl rollout undo deployment kubia --to-reversion=1 # 回滚到指定 REVERSION,若手动删除了 RS 则无法回滚 1345 | > kubectl rollout pause deployment kubia # 暂停升级,在新 Pod 上进行金丝雀发布验证 1346 | > kubectl rollout resume deployment kubia # 恢复 1347 | ``` 1348 | 1349 | 1350 | 1351 | ### 9.4 结合探针控制升级速度 1352 | 1353 | 可配置 `maxSurge` 和 `maxIUnavailable` 来控制升级的最大超意外的 Pod 数量、最多容忍不可用 Pod 数量。 1354 | 1355 | ```yaml 1356 | apiVersion: apps/v1 1357 | kind: Deployment 1358 | metadata: 1359 | name: kubia 1360 | spec: 1361 | replicas: 3 1362 | minReadySeconds: 10 # 新 Pod 就绪需等待 10s,才能继续滚动升级 1363 | strategy: 1364 | type: RollingUpdate 1365 | rollingUpdate: 1366 | maxSurge: 1 # 只允许最多超出一个非预期 pod,即只能逐个更新 1367 | maxUnavailable: 0 # 不允许有不可用的 Pod,以确保新 Pod 能逐个替换旧的 Pod 1368 | template: 1369 | metadata: 1370 | name: kubia 1371 | labels: 1372 | app: kubia 1373 | spec: 1374 | containers: 1375 | - name: nodejs 1376 | image: yinzige/kubia:v3 # 接收请求 5s 后返回 500 1377 | readinessProbe: 1378 | periodSeconds: 1 # 定义 HTTP Get 就绪探针每隔 1s 执行一次 1379 | httpGet: 1380 | port: 8080 1381 | path: / 1382 | selector: 1383 | matchLabels: 1384 | app: kubia 1385 | ``` 1386 | 1387 | ```shell 1388 | # apply 对象不存在则创建,否则修改对象属性 1389 | > kubectl apply -f kubia-deployment-v3-with-readinesscheck.yaml 1390 | ``` 1391 | 1392 | 使用上述 deployment 从正常版 v2 升级到 bug 版 v3,据配置 v3 的第一个 Pod 会创建并在第 5s 被 Service 标记为不可用,将其从 endpoint 中移除,请求不会分发到该 v3 Pod,10s 内未就绪,最终部署自动停止。 1393 | 1394 | 如下:kubia-54f54bf655 并未加入到 kubia Service 的 endpoints 中 1395 | 1396 | ```yaml 1397 | > kubectl get endpoints 1398 | NAME ENDPOINTS AGE 1399 | kubia 172.17.0.12:8080,172.17.0.6:8080,172.17.0.8:8080 140m 1400 | 1401 | > kubectl get pod -o wide 1402 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE 1403 | kubia-54f54bf655-tgvt9 0/1 Running 0 63s 172.17.0.7 m01 1404 | kubia-b669c877-8kx8c 1/1 Running 0 2m17s 172.17.0.6 m01 1405 | kubia-b669c877-957fl 1/1 Running 0 113s 172.17.0.12 m01 1406 | kubia-b669c877-n7hnb 1/1 Running 0 2m4s 172.17.0.8 m01 1407 | ``` 1408 | 1409 | 1410 | 1411 | ## 10. Stateful Set 1412 | 1413 | 场景:在有状态分部署存储应用中,Pod 的多副本有各自独立的 PVC 和 PV,pod 在新节点重建后需保证状态一致。 1414 | 1415 | ### 10.2 保证状态一致 1416 | 1417 | - 一致的网络标识 1418 | 1419 | sts 创建的 pod 名字后缀按顺序从 0 递增,通常通过 headless Service 暴露整个集群的 pod,每个 pod 有独立的 DNS 记录,pod 重建后保证名称和主机名一致。 1420 | 1421 | - 一致的存储 1422 | 1423 | sts 有 pod 模板和 PVC 模板,每个 pod 会绑定到唯一的 PVC 和 PV,重建后新 pod 会绑定到旧 PVC 和 PV 复用旧的存储。 1424 | 1425 | ### 10.3 使用 sts 1426 | 1427 | 三种必需资源:PV(若没有默认的 provisioner,则须手动创建)、控制 Service、sts 自身 1428 | 1429 | - 创建 headless 1430 | 1431 | ```yaml 1432 | apiVersion: v1 1433 | kind: Service 1434 | metadata: 1435 | name: kubia 1436 | spec: 1437 | clusterIP: None 1438 | selector: 1439 | app: kubia 1440 | ports: 1441 | - port: 80 1442 | name: http 1443 | ``` 1444 | 1445 | - 创建 sts:PVC 模板会在 pod 运行前创建,并且绑定到默认 storage class 的 provisioner 创建的 PV 上 1446 | 1447 | ```yaml 1448 | apiVersion: apps/v1 1449 | kind: StatefulSet 1450 | metadata: 1451 | name: kubia 1452 | spec: 1453 | serviceName: kubia # 绑定到 kubia 的 headless service 1454 | replicas: 2 1455 | template: 1456 | metadata: 1457 | labels: 1458 | app: kubia 1459 | spec: 1460 | containers: 1461 | - name: kubia 1462 | image: yinzige/kubia-pet 1463 | ports: 1464 | - containerPort: 8080 1465 | name: http 1466 | volumeMounts: 1467 | - mountPath: /var/data # PVC 绑定到 pod 目录 1468 | name: data 1469 | 1470 | volumeClaimTemplates: # 动态 PVC 模板,运行时提前创建 1471 | - metadata: 1472 | name: data 1473 | spec: 1474 | resources: 1475 | requests: 1476 | storage: 1Mi 1477 | accessModes: 1478 | - ReadWriteOnce 1479 | selector: 1480 | matchLabels: 1481 | app: kubia 1482 | ``` 1483 | 1484 | 注:sts 的创建或 scale 扩缩容,都是一次只操作一个 Pod 避免出现竞争、数据不一致的情况,pod 操作顺序与副本数顺序增减一致。 1485 | 1486 | 1487 | 1488 | ## 11. K8S 组件 1489 | 1490 | k8s 中各组件通过 API 服务器的 event 事件流的通知机制进行解耦,各组件之间不会直接通信,相互透明。[组件](https://asksendai.com/how-to-detect-kubernetes-vulnerability-cve-2019-11246-using-falco/): 1491 | 1492 | ![](https://mysieve-img.s3.amazonaws.com/pub/1564843777_2019_08_03_0dac5abc-584c-4be0-a63e-5eeaeca41452.png) 1493 | 1494 | - etcd:分布式一致性 KV 存储,只与 API Server 交互,存储集群各种资源元数据。 1495 | - API Server:提供对集群资源的 CURD 接口,推送资源变更的事件流到监听端。 1496 | - Scheduler:监听 Pod 创建事件,筛选出符合条件的节点并选出最优节点,更新 Pod 定义后发布给 API Server 1497 | 1498 | - 各种资源的 Controller:监听资源更新的事件流,检查副本数,同样更新元数据发布给 API Server 1499 | - kubectl:注册 Node 等待分配 Pod,告知 Docker 拉取镜像运行容器,随后向 API Server 会报 Pod 状态及指标 1500 | - kube-proxy:代理 Service 暴露的 IP 和 Port 1501 | 1502 | 1503 | 1504 | ### 11.2 Pod 创建流程 1505 | 1506 | 各控制器间通过 API Server 进行解耦: 1507 | 1508 | ![](https://images.yinzige.com/chain.png) 1509 | 1510 | 1511 | 1512 | ### 11.6 高可用 1513 | 1514 | API Server 和 etcd 可有多节点。 1515 | 1516 | 为避免并发竞争,各控制器同一时间只能有一个实例运行,通过竞争写注解字段的方式进行选举,调度器同理。 1517 | 1518 | 1519 | 1520 | 1521 | 1522 | ## 12. API Server 安全 1523 | 1524 | -------------------------------------------------------------------------------- /ch02/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch02/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | 4 | console.log("Kubia server starting..."); 5 | 6 | var handler = function (request, response) { 7 | console.log("Received request from " + request.connection.remoteAddress); 8 | response.writeHead(200); 9 | response.end("You've hit " + os.hostname() + "\n"); 10 | }; 11 | 12 | var www = http.createServer(handler); 13 | www.listen(8080); -------------------------------------------------------------------------------- /ch03/kubia-gpu.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubia-gpu 5 | spec: 6 | containers: 7 | - image: yinzige/kubia 8 | name: kubia-container-name 9 | ports: 10 | - containerPort: 8080 11 | protocol: TCP 12 | nodeSelector: 13 | gpu: "true" 14 | # kubernetes.io/hostname: minikube 15 | 16 | -------------------------------------------------------------------------------- /ch03/kubia-manual-with-labels.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubia-manual-v2 5 | labels: 6 | creation_mrthod: manual 7 | env: prod 8 | spec: 9 | containers: 10 | - image: yinzige/kubia 11 | name: kubia-container-name 12 | ports: 13 | - containerPort: 8080 14 | protocol: TCP -------------------------------------------------------------------------------- /ch03/kubia-manual.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubia-manual 5 | spec: 6 | containers: 7 | - image: yinzige/kubia 8 | name: kubia-container-name 9 | ports: 10 | - containerPort: 8080 11 | protocol: TCP -------------------------------------------------------------------------------- /ch04/batch-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: batch-job 5 | spec: 6 | completions: 5 7 | parallelism: 2 8 | template: 9 | metadata: 10 | labels: 11 | app: batch-job 12 | spec: 13 | restartPolicy: OnFailure 14 | containers: 15 | - name: main 16 | image: yinzige/batch-job -------------------------------------------------------------------------------- /ch04/batch-job/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox 2 | ENTRYPOINT echo "$(date) Batch job starting"; sleep 30; echo "$(date) Finished succesfully" -------------------------------------------------------------------------------- /ch04/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: cron-batch-job 5 | spec: 6 | schedule: "*/1 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: cron-batch-job 13 | spec: 14 | restartPolicy: OnFailure 15 | containers: 16 | - name: cron-batch-job 17 | image: yinzige/batch-job 18 | -------------------------------------------------------------------------------- /ch04/kubia-liveness-probe-initial-delay.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubia-unhealthy-v2 5 | spec: 6 | containers: 7 | - image: yinzige/kubia-unhealthy 8 | name: kubia 9 | livenessProbe: 10 | httpGet: 11 | path: '/' 12 | port: 8080 13 | initialDelaySeconds: 5 14 | periodSeconds: 10 15 | timeoutSeconds: 10 16 | -------------------------------------------------------------------------------- /ch04/kubia-liveness-probe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubia-unhealthy 5 | spec: 6 | containers: 7 | - image: yinzige/kubia-unhealthy 8 | name: kubia 9 | livenessProbe: 10 | httpGet: 11 | path: '/' 12 | port: 8080 -------------------------------------------------------------------------------- /ch04/kubia-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubia 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: kubia 9 | template: 10 | metadata: 11 | labels: 12 | app: kubia 13 | spec: 14 | containers: 15 | - name: kubia-container 16 | image: yinzige/kubia 17 | ports: 18 | - protocol: TCP 19 | containerPort: 8080 20 | 21 | -------------------------------------------------------------------------------- /ch04/kubia-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: kubia 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: kubia 10 | matchExpressions: 11 | - key: app 12 | operator: In 13 | values: 14 | - kubia 15 | - kubia-v2 16 | - key: app 17 | operator: NotIn 18 | values: 19 | - KUBIA 20 | - key: env 21 | operator: Exists 22 | - key: ENV 23 | operator: DoesNotExist 24 | template: 25 | metadata: 26 | labels: 27 | app: kubia 28 | env: env_exists 29 | spec: 30 | containers: 31 | - name: kubia 32 | image: yinzige/kubia 33 | ports: 34 | - protocol: TCP 35 | containerPort: 8080 36 | -------------------------------------------------------------------------------- /ch04/kubia/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch04/kubia/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | 4 | console.log("Kubia server starting..."); 5 | 6 | var requestCount = 0; 7 | 8 | var handler = function(request, response) { 9 | console.log("Received request from " + request.connection.remoteAddress); 10 | requestCount++; 11 | if (requestCount > 5) { 12 | response.writeHead(500); 13 | response.end("I'm not well. Please restart me!"); 14 | return; 15 | } 16 | response.writeHead(200); 17 | response.end("You've hit " + os.hostname() + "\n"); 18 | }; 19 | 20 | var www = http.createServer(handler); 21 | www.listen(8080); -------------------------------------------------------------------------------- /ch04/ssd-monitor-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: ssd-monitor 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: ssd-monitor 9 | template: 10 | metadata: 11 | labels: 12 | app: ssd-monitor 13 | spec: 14 | nodeSelector: 15 | disk: ssd 16 | containers: 17 | - name: main 18 | image: yinzige/ssd-monitor 19 | -------------------------------------------------------------------------------- /ch04/ssd-monitor/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox 2 | ENTRYPOINT while true; do echo 'SSD OK'; sleep 5; done -------------------------------------------------------------------------------- /ch05/external-service-endpoints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: baidu-endpoints 5 | subsets: 6 | - addresses: 7 | - ip: 220.181.38.148 # baidu.com 8 | - ip: 39.156.69.79 9 | ports: 10 | - port: 80 11 | -------------------------------------------------------------------------------- /ch05/external-service-externalname.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: baidu-endpoints 5 | spec: 6 | type: ExternalName 7 | externalName: www.baidu.com 8 | ports: 9 | - port: 80 10 | -------------------------------------------------------------------------------- /ch05/external-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: baidu-endpoints 5 | spec: 6 | ports: 7 | - port: 80 -------------------------------------------------------------------------------- /ch05/kubia-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: kubia 5 | spec: 6 | rules: 7 | - host: "kubia.example.com" 8 | http: 9 | paths: 10 | - path: /kubia 11 | backend: 12 | serviceName: kubia-nodeport 13 | servicePort: 80 14 | - path: /user 15 | backend: 16 | serviceName: user-svc 17 | servicePort: 90 18 | - host: "google.com" 19 | http: 20 | paths: 21 | - path: / 22 | backend: 23 | serviceName: gooele 24 | servicePort: 8080 -------------------------------------------------------------------------------- /ch05/kubia-rc-readinessprobe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubia 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: kubia 9 | template: 10 | metadata: 11 | labels: 12 | app: kubia 13 | spec: 14 | containers: 15 | - name: kubia-container 16 | image: yinzige/kubia 17 | readinessProbe: 18 | exec: 19 | command: 20 | - ls 21 | - /var/ready_now 22 | ports: 23 | - name: http 24 | containerPort: 8080 25 | - name: https 26 | containerPort: 8443 27 | 28 | -------------------------------------------------------------------------------- /ch05/kubia-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubia 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: kubia 9 | template: 10 | metadata: 11 | labels: 12 | app: kubia 13 | spec: 14 | containers: 15 | - name: kubia-container 16 | image: yinzige/kubia 17 | ports: 18 | - name: http 19 | containerPort: 8080 20 | - name: https 21 | containerPort: 8443 22 | 23 | -------------------------------------------------------------------------------- /ch05/kubia-svc-headless.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubia-headless 5 | spec: 6 | clusterIP: None 7 | ports: 8 | - port: 80 9 | targetPort: 8080 10 | selector: 11 | app: kubia -------------------------------------------------------------------------------- /ch05/kubia-svc-loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubia-loadbalancer 5 | spec: 6 | type: LoadBalancer 7 | ports: 8 | - port: 80 9 | targetPort: 8080 10 | selector: 11 | app: kubia -------------------------------------------------------------------------------- /ch05/kubia-svc-namenode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubia-nodeport 5 | spec: 6 | type: NodePort 7 | ports: 8 | - port: 80 9 | targetPort: 8080 10 | nodePort: 30001 11 | selector: 12 | app: kubia -------------------------------------------------------------------------------- /ch05/kubia-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubia 5 | spec: 6 | sessionAffinity: ClientIP 7 | ports: 8 | - name: http 9 | port: 80 10 | targetPort: http 11 | - name: https 12 | port: 443 13 | targetPort: https # defined in pod template.spec.containers.ports array 14 | selector: 15 | app: kubia -------------------------------------------------------------------------------- /ch06/fortune-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune 5 | spec: 6 | containers: 7 | - name: html-generator 8 | image: yinzige/fortuneloop 9 | volumeMounts: 10 | - name: html 11 | mountPath: /var/htdocs # 将 html 的卷挂载到 html-generator 容器的 /var/htdocs 目录 12 | - name: web-server 13 | image: nginx:alpine 14 | volumeMounts: 15 | - name: html 16 | mountPath: /usr/share/nginx/html # 将 html 的卷挂载 web-server 容器到 /usr/share/nginx/html 目录 17 | readOnly: true 18 | volumes: 19 | - name: html # 声明名为 emptyDir 的 emptyDir 卷 20 | emptyDir: {} -------------------------------------------------------------------------------- /ch06/fortune/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | RUN apt-get update ; apt-get -y install fortune 4 | ADD fortuneloop.sh /bin/fortuneloop.sh 5 | 6 | RUN chmod a+x /bin/fortuneloop.sh 7 | 8 | ENTRYPOINT /bin/fortuneloop.sh -------------------------------------------------------------------------------- /ch06/fortune/fortuneloop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | trap "exit" SIGINT 3 | mkdir /var/htdocs 4 | 5 | while : 6 | do 7 | echo $(date) Writing fortune to /var/htdocs/index.html 8 | /usr/games/fortune > /var/htdocs/index.html 9 | sleep 10 10 | done -------------------------------------------------------------------------------- /ch06/mongodb-pod-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mongodb 5 | spec: 6 | containers: 7 | - name: mongodb 8 | image: mongodb 9 | ports: 10 | - containerPort: 27017 11 | protocol: TCP 12 | volumeMounts: 13 | - mountPath: /data/db 14 | name: mongodb-data 15 | volumes: 16 | - name: mongodb-data 17 | persistentVolumeClaim: 18 | claimName: mongodb-pvc -------------------------------------------------------------------------------- /ch06/mongodb-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mongodb-pv 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | - ReadOnlyMany 11 | persistentVolumeReclaimPolicy: Retain 12 | hostPath: 13 | path: /tmp/mongodb-data -------------------------------------------------------------------------------- /ch06/mongodb-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mongodb-pvc 5 | spec: 6 | resources: 7 | requests: 8 | storage: 1Gi 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: "" -------------------------------------------------------------------------------- /ch06/storageclass-fast-hostpath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: fast 5 | provisioner: k8s.io/minikube-hostpath 6 | parameters: 7 | type: pd-ssd -------------------------------------------------------------------------------- /ch07/config-files/my-nginx-config.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name www.kubia-example.com; 4 | 5 | gzip on; 6 | gzip_types text/plain application/xml; 7 | 8 | location / { 9 | root /usr/share/nginx/html; 10 | index index.html index.htm; 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /ch07/config-files/sleep-interval: -------------------------------------------------------------------------------- 1 | 10 2 | -------------------------------------------------------------------------------- /ch07/fortune-args/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | RUN apt-get update ; apt-get -y install fortune 4 | ADD fortuneloop.sh /bin/fortuneloop.sh 5 | 6 | RUN chmod a+x /bin/fortuneloop.sh 7 | 8 | ENTRYPOINT ["/bin/fortuneloop.sh"] 9 | 10 | CMD ["10"] -------------------------------------------------------------------------------- /ch07/fortune-args/fortuneloop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | trap "exit" SIGINT 3 | 4 | INTERVAL=$1 5 | echo Configured to generate new fortune every $INTERVAL seconds 6 | 7 | mkdir -p /var/htdocs 8 | 9 | while :; do 10 | echo $(date) Writing fortune to /var/htdocs/index.html 11 | /usr/games/fortune >/var/htdocs/index.html 12 | sleep $INTERVAL 13 | done 14 | -------------------------------------------------------------------------------- /ch07/fortune-auth/password.txt: -------------------------------------------------------------------------------- 1 | IT'S EMPTY -------------------------------------------------------------------------------- /ch07/fortune-env/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | RUN apt-get update ; apt-get -y install fortune 4 | ADD fortuneloop.sh /bin/fortuneloop.sh 5 | 6 | RUN chmod a+x /bin/fortuneloop.sh 7 | 8 | ENTRYPOINT ["/bin/fortuneloop.sh"] 9 | 10 | CMD ["10"] -------------------------------------------------------------------------------- /ch07/fortune-env/fortuneloop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | trap "exit" SIGINT 3 | 4 | echo Configured to generate new fortune every $INTERVAL seconds 5 | 6 | mkdir -p /var/htdocs 7 | 8 | while :; do 9 | echo $(date) Writing fortune to /var/htdocs/index.html 10 | /usr/games/fortune >/var/htdocs/index.html 11 | sleep $INTERVAL 12 | done 13 | -------------------------------------------------------------------------------- /ch07/fortune-pod-configmap-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-configmap-volume 5 | spec: 6 | containers: 7 | - name: html-generator 8 | image: yinzige/fortuneloop:env 9 | env: 10 | - name: INTERVAL 11 | valueFrom: 12 | configMapKeyRef: 13 | key: sleep-interval 14 | name: fortune-config # cm name 15 | volumeMounts: 16 | - mountPath: /var/htdocs 17 | name: html 18 | - name: web-server 19 | image: nginx:alpine 20 | volumeMounts: 21 | - mountPath: /usr/share/nginx/html 22 | name: html 23 | readOnly: true 24 | - mountPath: /etc/nginx/conf.d/gzip_in.conf 25 | name: config 26 | subPath: gzip.conf 27 | readOnly: true 28 | ports: 29 | - containerPort: 80 30 | name: http 31 | protocol: TCP 32 | volumes: 33 | - name: html 34 | emptyDir: {} 35 | - name: config 36 | configMap: 37 | name: fortune-config # cm name 38 | defaultMode: 0666 39 | items: 40 | - key: my-nginx-config.conf 41 | path: gzip.conf # 42 | -------------------------------------------------------------------------------- /ch07/fortune-pod-env-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-env-from-configmap 5 | spec: 6 | containers: 7 | - name: fortune-env 8 | image: yinzige/fortuneloop:env 9 | env: 10 | - name: INTERVAL2 11 | valueFrom: 12 | configMapKeyRef: 13 | name: fortune-config-cm 14 | key: INTERVAL 15 | envFrom: 16 | - prefix: CONF_ 17 | configMapRef: 18 | name: fortune-config-cm -------------------------------------------------------------------------------- /ch07/fortune-secret-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-with-serect 5 | spec: 6 | containers: 7 | - name: fortune-auth-main 8 | image: yinzige/fortuneloop 9 | volumeMounts: 10 | - mountPath: /tmp/no_password.txt 11 | subPath: password.txt 12 | name: auth 13 | volumes: 14 | - name: auth 15 | secret: 16 | secretName: fortune-auth -------------------------------------------------------------------------------- /ch07/fortuneloop-args-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune5s 5 | spec: 6 | containers: 7 | - name: html-generator 8 | image: yinzige/fortuneloop:args 9 | args: 10 | - "5" 11 | volumeMounts: 12 | - mountPath: /var/htdocs 13 | name: html 14 | - name: web-server 15 | image: nginx:alpine 16 | volumeMounts: 17 | - mountPath: /usr/share/nginx/html 18 | name: html 19 | volumes: 20 | - name: html 21 | emptyDir: {} 22 | -------------------------------------------------------------------------------- /ch07/fortuneloop-env-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune6s 5 | spec: 6 | containers: 7 | - name: html-generator 8 | image: yinzige/fortuneloop:env 9 | env: 10 | - name: "INTERVAL" 11 | value: "6" 12 | - name: "NESTED_VAR" 13 | value: "$(INTERVAL)_1" 14 | volumeMounts: 15 | - mountPath: /var/htdocs 16 | name: html 17 | - name: web-server 18 | image: nginx:alpine 19 | volumeMounts: 20 | - mountPath: /usr/share/nginx/html 21 | name: html 22 | volumes: 23 | - name: html 24 | emptyDir: {} 25 | -------------------------------------------------------------------------------- /ch07/interval_val.txt: -------------------------------------------------------------------------------- 1 | interval=9 -------------------------------------------------------------------------------- /ch07/pod-with-private-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: private-it 5 | spec: 6 | imagePullSecrets: 7 | - name: dockerhub-secret 8 | containers: 9 | - name: main 10 | image: yinzige/busybox:secret 11 | command: 12 | - ls 13 | args: 14 | - / -------------------------------------------------------------------------------- /ch08/curl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: curl 5 | spec: 6 | containers: 7 | - name: main 8 | image: tutum/curl 9 | command: ["sleep", "10000"] -------------------------------------------------------------------------------- /ch08/downward-api-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: downward 5 | spec: 6 | containers: 7 | - name: main 8 | image: busybox 9 | command: 10 | - "sleep" 11 | - "1000" 12 | env: 13 | - name: E_POD_NAMESPACE 14 | valueFrom: 15 | fieldRef: 16 | fieldPath: metadata.namespace 17 | - name: E_POD_IP 18 | valueFrom: 19 | fieldRef: 20 | fieldPath: status.podIP # 运行时元数据 21 | - name: E_REQ_CPU 22 | valueFrom: 23 | resourceFieldRef: # 引用容器级别的数据,如请求的 CPU、内存用量等需引用 resourceFieldRef 24 | resource: requests.cpu 25 | divisor: 1m # 资源单位 26 | - name: E_LIMIT_MEM 27 | valueFrom: 28 | resourceFieldRef: 29 | resource: limits.memory 30 | divisor: 1Ki -------------------------------------------------------------------------------- /ch08/downward-api-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: downward-volume-pod 5 | labels: 6 | is_ready: "ok" 7 | annotations: 8 | key1: VALUE1 9 | key2: | 10 | VALUE2 11 | VALUE20 12 | VALUE200 13 | spec: 14 | containers: 15 | - name: main 16 | image: busybox 17 | command: 18 | - "sleep" 19 | - "1000" 20 | volumeMounts: 21 | - mountPath: /etc/downward 22 | name: downward 23 | volumes: 24 | - name: downward 25 | downwardAPI: 26 | items: 27 | - path: "pod_labels" 28 | fieldRef: 29 | fieldPath: metadata.labels 30 | - path: "pod_annotations" 31 | fieldRef: 32 | fieldPath: metadata.annotations 33 | - path: "container_request_memory" 34 | resourceFieldRef: 35 | containerName: main 36 | resource: requests.cpu 37 | divisor: 1m 38 | -------------------------------------------------------------------------------- /ch09/kubia-deployment-v3-with-readinescheck.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kubia 5 | spec: 6 | replicas: 3 7 | minReadySeconds: 10 # 新 Pod 就绪需等待 10s,才能继续滚动升级 8 | strategy: 9 | type: RollingUpdate 10 | rollingUpdate: 11 | maxSurge: 1 # 只允许最多超出一个非预期 pod,即只能逐个更新 12 | maxUnavailable: 0 # 不允许有不可用的 Pod,以确保新 Pod 能逐个替换旧的 Pod 13 | template: 14 | metadata: 15 | name: kubia 16 | labels: 17 | app: kubia 18 | spec: 19 | containers: 20 | - name: nodejs 21 | image: yinzige/kubia:v3 # 接收请求 5s 后返回 500 22 | readinessProbe: 23 | periodSeconds: 1 # 定义 HTTP Get 就绪探针每隔 1s 执行一次 24 | httpGet: 25 | port: 8080 26 | path: / 27 | selector: 28 | matchLabels: 29 | app: kubia -------------------------------------------------------------------------------- /ch09/kubia-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kubia 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | name: kubia 10 | labels: 11 | app: kubia 12 | spec: 13 | containers: 14 | - name: nodejs 15 | image: yinzige/kubia:v1 16 | selector: 17 | matchLabels: 18 | app: kubia -------------------------------------------------------------------------------- /ch09/kubia-rc-and-service-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubia-v1 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | name: kubia 10 | labels: 11 | app: kubia 12 | spec: 13 | containers: 14 | - name: nodejs 15 | image: yinzige/kubia:v1 16 | 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: kubia 23 | spec: 24 | type: LoadBalancer 25 | selector: 26 | app: kubia 27 | ports: 28 | - port: 80 29 | targetPort: 8080 30 | -------------------------------------------------------------------------------- /ch09/v1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch09/v1/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | 4 | console.log("Kubia server starting..."); 5 | 6 | var handler = function(request, response) { 7 | console.log("Received request from " + request.connection.remoteAddress); 8 | response.writeHead(200); 9 | response.end("This is v1 running in pod " + os.hostname() + "\n"); 10 | }; 11 | 12 | var www = http.createServer(handler); 13 | www.listen(8080); 14 | 15 | -------------------------------------------------------------------------------- /ch09/v2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch09/v2/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | 4 | console.log("Kubia server starting..."); 5 | 6 | var handler = function(request, response) { 7 | console.log("Received request from " + request.connection.remoteAddress); 8 | response.writeHead(200); 9 | response.end("This is VERSION_v2 running in pod " + os.hostname() + "\n"); 10 | }; 11 | 12 | var www = http.createServer(handler); 13 | www.listen(8080); 14 | 15 | -------------------------------------------------------------------------------- /ch09/v3/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch09/v3/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | 4 | var requestCount = 0; 5 | 6 | console.log("Kubia server starting..."); 7 | 8 | var handler = function(request, response) { 9 | console.log("Received request from " + request.connection.remoteAddress); 10 | if (++requestCount >= 5) { 11 | response.writeHead(500); 12 | response.end("Some internal error has occurred! This is pod " + os.hostname() + "\n"); 13 | return; 14 | } 15 | response.writeHead(200); 16 | response.end("This is v3 running in pod " + os.hostname() + "\n"); 17 | }; 18 | 19 | var www = http.createServer(handler); 20 | www.listen(8080); 21 | 22 | -------------------------------------------------------------------------------- /ch09/v4/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch09/v4/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | 4 | console.log("Kubia server starting..."); 5 | 6 | var handler = function(request, response) { 7 | console.log("Received request from " + request.connection.remoteAddress); 8 | response.writeHead(200); 9 | response.end("This is v4 running in pod " + os.hostname() + "\n"); 10 | }; 11 | 12 | var www = http.createServer(handler); 13 | www.listen(8080); 14 | 15 | -------------------------------------------------------------------------------- /ch10/kubia-headless-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubia 5 | spec: 6 | clusterIP: None 7 | selector: 8 | app: kubia 9 | ports: 10 | - port: 80 11 | name: http -------------------------------------------------------------------------------- /ch10/kubia-pet-image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12.0-slim 2 | ADD app.js /app.js 3 | ENTRYPOINT ["node", "app.js"] -------------------------------------------------------------------------------- /ch10/kubia-pet-image/app.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const os = require('os'); 3 | const fs = require('fs'); 4 | 5 | const dataFile = "/var/data/kubia.txt"; 6 | 7 | function fileExists(file) { 8 | try { 9 | fs.statSync(file); 10 | return true; 11 | } catch (e) { 12 | return false; 13 | } 14 | } 15 | 16 | var handler = function(request, response) { 17 | if (request.method == 'POST') { 18 | var file = fs.createWriteStream(dataFile); 19 | file.on('open', function (fd) { 20 | request.pipe(file); 21 | console.log("New data has been received and stored."); 22 | response.writeHead(200); 23 | response.end("Data stored on pod " + os.hostname() + "\n"); 24 | }); 25 | } else { 26 | var data = fileExists(dataFile) ? fs.readFileSync(dataFile, 'utf8') : "No data posted yet"; 27 | response.writeHead(200); 28 | response.write("You've hit " + os.hostname() + "\n"); 29 | response.end("Data stored on this pod: " + data + "\n"); 30 | } 31 | }; 32 | 33 | var www = http.createServer(handler); 34 | www.listen(8080); 35 | 36 | -------------------------------------------------------------------------------- /ch10/kubia-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: kubia 5 | spec: 6 | serviceName: kubia # 绑定到 kubia 的 headless service 7 | replicas: 2 8 | template: 9 | metadata: 10 | labels: 11 | app: kubia 12 | spec: 13 | containers: 14 | - name: kubia 15 | image: yinzige/kubia-pet 16 | ports: 17 | - containerPort: 8080 18 | name: http 19 | volumeMounts: 20 | - mountPath: /var/data 21 | name: data 22 | 23 | volumeClaimTemplates: # 动态 PVC 模板,运行时提前创建 24 | - metadata: 25 | name: data 26 | spec: 27 | resources: 28 | requests: 29 | storage: 1Mi 30 | accessModes: 31 | - ReadWriteOnce 32 | selector: 33 | matchLabels: 34 | app: kubia --------------------------------------------------------------------------------