├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── SUMMARY.md ├── book.json ├── configmapdemo ├── cm-demo1.yaml ├── cm-demo2.yaml ├── cm-demo3.yaml ├── cm-demo4.yaml └── cm-demo5.yaml ├── daemonsetdemo └── daemonset-demo.yaml ├── dashboard ├── admin-acount.yaml ├── dashboard.yaml ├── mac.token └── token ├── deploymentdemo ├── deploy-demo.yaml └── deploy-svc.yaml ├── dnsdemo ├── test-api.yaml └── test-nginx.yaml ├── docs ├── 1.课程介绍.md ├── 10.Docker Swarm.md ├── 11.图形化管理和监控.md ├── 12.Docker的多阶段构建.md ├── 13.Dockerfile最佳实践.md ├── 14.Kubernetes初体验.md ├── 15.基本概念与组件.md ├── 16.用 kubeadm 搭建集群环境.md ├── 17.安装 Dashboard 插件.md ├── 18.YAML 文件.md ├── 19.静态 Pod.md ├── 2.Docker 简介.md ├── 20.Pod Hook.md ├── 21.健康检查.md ├── 22.初始化容器.md ├── 23.使用RC管理Pod.md ├── 24.Deployment 使用.md ├── 25.Pod 水平自动伸缩.md ├── 26.Job与Cronjob 的使用.md ├── 27.Service.md ├── 28.ConfigMap.md ├── 29.Secret.md ├── 3.镜像和容器的基本操作.md ├── 30.RBAC.md ├── 31.部署 Wordpress 示例.md ├── 32.DaemonSet 与 StatefulSet.md ├── 33.PV.md ├── 34.PVC.md ├── 35.StorageClass.md ├── 36.Jenkins Slave.md ├── 37.Jenkins Pipeline.md ├── 38.Jenkins BlueOcean.md ├── 39.kubedns.md ├── 4.Dockerfile 定制镜像.md ├── 40.ingress.md ├── 41.ingress config.md ├── 42.Helm安装.md ├── 43.Helm基本使用.md ├── 44.Helm模板之内置函数和Values.md ├── 45.Helm模板之模板函数与管道.md ├── 46.Helm模板之控制流程.md ├── 47.Helm模板之命名模板.md ├── 48.Helm模板之其他注意事项.md ├── 49.Helm Hooks.md ├── 5.私有镜像仓库.md ├── 50.Kubernetes调度策略.md ├── 51.Kubernetes亲和性调度.md ├── 52.Prometheus基本使用.md ├── 53.监控Kubernetes集群应用.md ├── 54.监控Kubernetes集群节点.md ├── 55.监控Kubernetes常用资源对象.md ├── 56.Grafana的安装使用.md ├── 57.AlertManager的使用.md ├── 58.Prometheus Operator.md ├── 59.自定义Prometheus Operator 监控项.md ├── 6.数据共享与持久化.md ├── 60.Prometheus Operator高级配置.md ├── 61.日志收集架构.md ├── 62.搭建 EFK 日志系统.md ├── 63.Harbor.md ├── 64.Gitlab.md ├── 65.Gitlab CI.md ├── 66.devops.md ├── 67.Upgrade集群.md ├── 7.Docker的网络模式.md ├── 8.Docker Compose.md ├── 9.Docker Machine.md ├── Helm 部署应用示例-todo.md ├── files │ ├── grafana-k8s-cluster-dashboard.json │ └── pod.json ├── images │ ├── access-modes.png │ ├── alertmanager-dingtalk-message.png │ ├── blue-demo1.png │ ├── blue-demo2.png │ ├── blue-demo3.png │ ├── blue-demo4.png │ ├── blue-demo5.png │ ├── blue-demo6.png │ ├── blue-demo7.png │ ├── course-logo.png │ ├── dashboard-login.png │ ├── dashboard.png │ ├── deployment.png │ ├── dns.png │ ├── docker-cadvisor.png │ ├── docker-engine.png │ ├── docker-netework-bridge.jpeg │ ├── docker-network-container.jpeg │ ├── docker-network-host.jpeg │ ├── docker-network-none.jpeg │ ├── docker-structrue.png │ ├── docker-swarm-structrue.png │ ├── docker-swarm-task-service.png │ ├── docker-ui-portainer.png │ ├── docker-ui-rancher.png │ ├── docker-what.png │ ├── grafana-add-dingtalk-robot.png │ ├── grafana-alert-dingtalk-robot.png │ ├── grafana-alert-email.png │ ├── grafana-cluster-table.png │ ├── grafana-cpu-usage.png │ ├── grafana-dashboard-add.png │ ├── grafana-dashboard-edit.png │ ├── grafana-dashboard-edit2.png │ ├── grafana-dashboard-import.png │ ├── grafana-dashboard-import2.png │ ├── grafana-dingtalk-alert2.png │ ├── grafana-email-alert.png │ ├── grafana-email-alert2.png │ ├── grafana-graph-alert.png │ ├── grafana-graph-notify.png │ ├── grafana-index.png │ ├── grafana-k8s-cluster-dashboard.png │ ├── grafana-k8s-monitor.png │ ├── grafana-k8s-plugin-cluster.png │ ├── grafana-k8s-plugin-config.png │ ├── grafana-k8s-plugin.png │ ├── grafana-k8s-plugin2.png │ ├── grafana-login.png │ ├── grafana-prometheus-ds.png │ ├── grafana-table.png │ ├── helm-demo1.png │ ├── helm-structrure.png │ ├── horizontal-pod-autoscaler.svg │ ├── ingress-config1.png │ ├── ingress-config2.png │ ├── ingress-config3.png │ ├── ingress-config4.png │ ├── jenkins-demo1-config.jpeg │ ├── jenkins-demo1-config2.jpeg │ ├── jenkins-demo1-config3.jpeg │ ├── jenkins-demo1-config4.jpeg │ ├── jenkins-demo1-config5.jpeg │ ├── jenkins-demo1.png │ ├── jenkins-k8s-config1.jpg │ ├── jenkins-k8s-config2.png │ ├── jenkins-k8s-config3.png │ ├── jenkins-k8s-config4.png │ ├── k8s-basic.png │ ├── k8s-cluster.png │ ├── k8s-jenkins-slave.png │ ├── k8s-katacoda.png │ ├── k8s-pod-process.png │ ├── k8s-pod.png │ ├── k8s-qrcode.png │ ├── k8s-rancher.png │ ├── k8s-service.png │ ├── k8s-structure.jpeg │ ├── kube-scheduler-detail.png │ ├── kube-scheduler-filter.jpg │ ├── kube-scheduler-structrue.jpg │ ├── kubeadm-dashboard.png │ ├── kubedns.jpg │ ├── kubernetes_monitoring_heapster.png │ ├── loap.jpg │ ├── nginx-consul.png │ ├── ngx200.png │ ├── ngx403.png │ ├── pipeline-demo1.png │ ├── pipeline-demo2.png │ ├── pipeline-demo3.png │ ├── pipeline-demo4.png │ ├── pipeline-demo5.png │ ├── pipeline-demo6.png │ ├── pipeline-demo7.png │ ├── prometheus-alert-email.png │ ├── prometheus-alertmanager-silence.png │ ├── prometheus-alertmanager-webui.png │ ├── prometheus-alerts.png │ ├── prometheus-alerts2.png │ ├── prometheus-apiserver-request.png │ ├── prometheus-apiserver.png │ ├── prometheus-apiserver2.png │ ├── prometheus-architecture.png │ ├── prometheus-cadvisor-graph.png │ ├── prometheus-cadvisor.png │ ├── prometheus-dashboard-targets.png │ ├── prometheus-menu.png │ ├── prometheus-metrics-graph.png │ ├── prometheus-metrics-menu.png │ ├── prometheus-nodes-graph1.png │ ├── prometheus-nodes-graph2.png │ ├── prometheus-nodes-target.png │ ├── prometheus-nodes-target2.png │ ├── prometheus-operator.png │ ├── prometheus-service-endpoints.png │ ├── prometheus-service-endpoints2.png │ ├── prometheus-targets-redis.png │ ├── prometheus-webui.png │ ├── promethues-alertmanager-email2.png │ ├── promethues-nodes-target2.png │ ├── promethues-operator-grafana.png │ ├── promethues-operator-kube-scheduler-error.png │ ├── promethues-operator-kube-scheduler.png │ ├── promethues-operator-targets.png │ ├── promtheus-before-label.png │ ├── redis-graph.png │ ├── redis-metrics.png │ ├── services-iptables-overview.svg │ ├── setup-jenkins-01-unlock.jpg │ ├── setup-jenkins-02-plugin.png │ ├── setup-jenkins-home.png │ ├── setup-jenkins-k8s-plugin.png │ ├── traefik-tls-demo1.png │ ├── traefik-tls-demo2.png │ ├── traefik-tls-demo3.png │ ├── traefik-tls.png │ ├── wordpress-home.jpg │ ├── wordpress-ui.jpg │ ├── wordpress.jpg │ ├── ydzs-qrcode.png │ └── ydzs-xcx.png ├── index.md └── test.md ├── efkdemo ├── elasticsearch-statefulset.yaml ├── elasticsearch-storageclass.yaml ├── elasticsearch-svc.yaml ├── fluentd-configmap.yaml ├── fluentd-daemonset.yaml ├── kibana.yaml └── kube-logging.yaml ├── elastic-single ├── elastic.yaml └── kibana.yaml ├── grafana ├── grafana-chown-job.yaml ├── grafana-cm.yaml ├── grafana-deploy.yaml ├── grafana-svc.yaml ├── grafana-volume.yaml └── grafana.ini ├── hook ├── poststart-hook.yaml └── prestop-hook.yaml ├── hpademo └── hpa-demo.yaml ├── initcontainer ├── initconfig.yaml ├── initpod1.yaml └── initservice.yaml ├── jenkins ├── jenkins-slave.Dockerfile ├── jenkins.yaml ├── rbac.yaml ├── svc.yaml └── volume.yaml ├── jobdemo ├── cronjob-demo.yaml └── job-demo.yaml ├── kubedns.yaml ├── kubedns └── kubedns.yaml ├── livenessprobe ├── liveness-exec.yaml ├── liveness-http.yaml └── liveness-readness.yaml ├── logdemo └── two-files-counter-pod-streaming-sidecar.yaml ├── mychart ├── Chart.yaml ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ └── configmap.yaml └── values.yaml ├── mynginx ├── .dockerignore ├── Dockerfile ├── add.json ├── index.html └── test.json ├── myproject ├── context │ ├── .dockerignore │ ├── hello │ ├── test │ │ └── test.txt │ └── world.txt └── dockerfiles │ └── Dockerfile ├── nodedemo ├── Dockerfile ├── index.js ├── mynode │ └── Dockerfile └── package.json ├── nodedemo2 ├── Dockerfile ├── index.js └── package.json ├── package-lock.json ├── prome ├── dingtalk-hook.yaml ├── prome-cm.yaml ├── prome-deploy.yaml ├── prome-node-exporter.yaml ├── prome-rbac.yaml ├── prome-redis-exporter.yaml ├── prome-svc.yaml └── prome-volume.yaml ├── prometheus ├── node-exporter.yaml ├── prometheus-cm.yaml ├── prometheus-deploy.yaml ├── prometheus-pv.yaml ├── prometheus-rbac.yaml └── prometheus-svc.yaml ├── pvdemo ├── nfs-pvc-deploy.yaml ├── pv-nfs.yaml ├── pv2-nfs.yaml ├── pvc-nfs.yaml └── pvc2-nfs.yaml ├── pydemo ├── Dockerfile ├── app.py └── docker-compose.yml ├── rbacdemo ├── haimaxy-role.yaml ├── haimaxy-rolebinding.yaml ├── haimaxy-sa-clusterrole.yaml ├── haimaxy-sa-role.yaml └── haimaxy-sa-rolebinding.yaml ├── rcdemo └── rc-demo.yaml ├── scheduler ├── node-affinity-demo.yaml ├── node-selector-demo.yaml ├── pod-affinity-demo.yaml └── pod-antiaffinity-demo.yaml ├── secretdemo ├── secret-demo.yaml ├── secret-pod.yaml ├── secret1-pod.yaml └── secret2-pod.yaml ├── servicedemo └── service-demo.yaml ├── statefulsetdemo ├── pv001.yaml └── statefulset-demo.yaml ├── staticpod └── static-pod.yaml ├── storageclassdemo ├── class.yaml ├── deployment.yaml ├── rbac.yaml ├── test-pod.yaml ├── test-pvc.yaml └── test-statefulset.yaml ├── test.sh ├── traefikdemo ├── example-ingress.yaml ├── example.yaml ├── ingress.yaml ├── rbac.yaml ├── traefik.toml └── traefik.yaml ├── wordpress ├── docker-compose.yml ├── wordpress-all.yaml ├── wordpress-db.yaml ├── wordpress-pod.yaml └── wordpress.yaml └── yaml ├── pod-example.yaml ├── test1.json ├── test1.yaml ├── test2.json ├── test2.yaml ├── test3.json ├── test3.yml ├── test4.json └── test4.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | node_modules/ 3 | _book 4 | *.pdf 5 | *.epub -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:jessie 2 | 3 | RUN buildDeps='gcc libc6-dev make' \ 4 | && apt-get update \ 5 | && apt-get install -y $buildDeps \ 6 | && wget -O redis.tar.gz "http://download.redis.io/releases/redis-3.2.5.tar.gz" \ 7 | && mkdir -p /usr/src/redis \ 8 | && tar -xzf redis.tar.gz -C /usr/src/redis --strip-components=1 \ 9 | && make -C /usr/src/redis \ 10 | && make -C /usr/src/redis install \ 11 | && rm -rf /var/lib/apt/lists/* \ 12 | && rm redis.tar.gz \ 13 | && rm -r /usr/src/redis \ 14 | && apt-get purge -y --auto-remove $buildDeps 15 | 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 从Docker到Kubernetes进阶 2 | 从 Docker 入门一步步迁移到 Kubernetes 的进阶课程 3 | 4 | 在线浏览:[https://www.qikqiak.com/k8s-book](https://www.qikqiak.com/k8s-book/) 5 | 6 | GitHub地址:[https://github.com/cnych/kubernetes-learning/](https://github.com/cnych/kubernetes-learning/) 7 | 8 | 视频课程在线地址:[https://youdianzhishi.com/course/6n8xd6/](https://youdianzhishi.com/course/6n8xd6/) 9 | 10 | 11 | ## 介绍 12 | 13 | [Kubernetes](http://kubernetes.io/)是Google基于Borg开源的容器编排调度引擎,作为[CNCF](http://cncf.io/)(Cloud Native Computing Foundation)最重要的组件之一,它的目标不仅仅是一个编排系统,而是提供一个规范,可以让你来描述集群的架构,定义服务的最终状态,`Kubernetes` 可以帮你将系统自动地达到和维持在这个状态。`Kubernetes` 作为云原生应用的基石,相当于一个云操作系统,其重要性不言而喻。 14 | 15 | ![从Docker到Kubernetes进阶](http://sdn.haimaxy.com/covers/2018/4/21/c4082e0f09c746aa848279a2567cffed.png) 16 | 17 | 之前一直有同学跟我说我 `Docker` 掌握得还可以,但是不知道怎么使用 `Kubernetes`,网上的其他关于 `Kubernetes` 的课程费用又太高,本书就是为你们准备的,当然如果你不了解 `Docker`,不了解 `Kubernetes`,都没有关系,我们会从 `Docker` 入门一步步深入,到 `Kubernetes` 的进阶使用的。所以大家完全没必要担心。 18 | 19 | 学完本课程以后,你将会对 `Docker` 和 `Kubernetes` 有一个更加深入的认识,我们会讲到: 20 | 21 | * `Docker` 的一些常用方法,当然我们的重点会在 Kubernetes 上面 22 | * 会用 `kubeadm` 来搭建一套 `Kubernetes` 的集群 23 | * 理解 `Kubernetes` 集群的运行原理 24 | * 常用的一些控制器使用方法 25 | * 还有 `Kubernetes` 的一些调度策略 26 | * `Kubernetes`的运维 27 | * 包管理工具 `Helm` 的使用 28 | * 最后我们会实现基于 Kubernetes 的 CI/CD 29 | 30 | 31 | ## 社区&读者交流 32 | 33 | * 博客:[阳明的博客](https://qikqiak.com/) 34 | * 微信群:`k8s`技术圈,扫描我的微信二维码,[阳明](https://qikqiak.com/page/about/),或直接搜索微信号**iEverything**后拉您入群,请增加备注(k8s或kubernetes) 35 | * 知乎专栏:[k8s技术圈](https://zhuanlan.zhihu.com/kube100) 36 | * 开发者头条:[k8s技术圈](https://toutiao.io/subjects/268333) 37 | * 微信公众号:扫描下面的二维码关注微信公众号`k8s技术圈` 38 | 39 | ![k8s公众帐号](./docs/images/k8s-qrcode.png) 40 | 41 | * 优点知识:[优点知识](https://youdianzhishi.com/)是一个综合的技术学习平台,本书配套的视频教程将会发布在该平台上面,感兴趣的朋友可以扫描下发的二维码关注自己感兴趣的课程。 42 | 43 | ![优点知识服务号](./docs/images/ydzs-qrcode.png) 44 | ![优点知识小程序](./docs/images/ydzs-xcx.png) 45 | 46 | -------------------------------------------------------------------------------- /SUMMARY.md: -------------------------------------------------------------------------------- 1 | # 目录 2 | 3 | ### 介绍 4 | * [序言](README.md) 5 | * [课程介绍](docs/1.课程介绍.md) 6 | 7 | ### Docker 基础 8 | * [Docker 简介](docs/2.Docker 简介.md) 9 | * [镜像和容器的基本操作](docs/3.镜像和容器的基本操作.md) 10 | * [Dockerfile 定制镜像](docs/4.Dockerfile 定制镜像.md) 11 | * [私有镜像仓库](docs/5.私有镜像仓库.md) 12 | * [数据共享与持久化](docs/6.数据共享与持久化.md) 13 | * [Docker 的网络模式](docs/7.Docker的网络模式.md) 14 | 15 | ### Docker 三架马车 16 | * [Docker Compose](docs/8.Docker Compose.md) 17 | * [Docker Machine](docs/9.Docker Machine.md) 18 | * [Docker Swarm](docs/10.Docker Swarm.md) 19 | 20 | ### Docker 实践 21 | * [图形化管理和监控](docs/11.图形化管理和监控.md) 22 | * [Docker 的多阶段构建](docs/12.Docker的多阶段构建.md) 23 | * [Dockerfile 最佳实践](docs/13.Dockerfile最佳实践.md) 24 | 25 | ### Kubernetes 基础 26 | * [Kubernetes 初体验](docs/14.Kubernetes初体验.md) 27 | * [基本概念与组件](docs/15.基本概念与组件.md) 28 | 29 | ### kubeadm 搭建集群 30 | * [使用 kubeadm 搭建集群环境](docs/16.用 kubeadm 搭建集群环境.md) 31 | * [安装 Dashboard 插件](docs/17.安装 Dashboard 插件.md) 32 | 33 | ### 深入理解 Pod 34 | * [YAML 文件](docs/18.YAML 文件.md) 35 | * [静态 Pod](docs/19.静态 Pod.md) 36 | * [Pod Hook](docs/20.Pod Hook.md) 37 | * [Pod 的健康检查](docs/21.健康检查.md) 38 | * [初始化容器](docs/22.初始化容器.md) 39 | 40 | ### 常用对象操作: 41 | * [Replication Controller 与 Replica Set](docs/23.使用RC管理Pod.md) 42 | * [Deployment](docs/24.Deployment 使用.md) 43 | * [HPA](docs/25.Pod 水平自动伸缩.md) 44 | * [Job/CronJob](docs/26.Job与Cronjob 的使用.md) 45 | * [Service](docs/27.Service.md) 46 | * [ConfigMap](docs/28.ConfigMap.md) 47 | * [Secret](docs/29.Secret.md) 48 | * [RBAC](docs/30.RBAC.md) 49 | * [部署Wordpress示例](docs/31.部署 Wordpress 示例.md) 50 | * [DaemonSet 和 StatefulSet](docs/32.DaemonSet 与 StatefulSet.md) 51 | 52 | ### 持久化存储: 53 | * [PV](docs/33.PV.md) 54 | * [PVC](docs/34.PVC.md) 55 | * [StorageClass](docs/35.StorageClass.md) 56 | 57 | ### 服务发现 58 | * [kubedns](docs/39.kubedns.md) 59 | * [ingress 安装配置](docs/40.ingress.md) 60 | * [ingress tls 和 path 的使用](docs/41.ingress config.md) 61 | 62 | ### 包管理工具 Helm 63 | * [Helm 的安装使用](docs/42.Helm安装.md) 64 | * [Helm 的基本使用](docs/43.Helm基本使用.md) 65 | * [Helm 模板之内置函数和Values](docs/44.Helm模板之内置函数和Values.md) 66 | * [Helm 模板之模板函数与管道](docs/45.Helm模板之模板函数与管道.md) 67 | * [Helm 模板之控制流程](docs/46.Helm模板之控制流程.md) 68 | * [Helm 模板之命名模板](docs/47.Helm模板之命名模板.md) 69 | * [Helm 模板之其他注意事项](docs/48.Helm模板之其他注意事项.md) 70 | * [Helm Hooks](docs/49.Helm Hooks.md) 71 | 72 | 73 | ### 调度器 74 | * [Kubernetes 调度器介绍](docs/50.Kubernetes调度策略.md) 75 | * [Kubernetes 亲和性调度](docs/51.Kubernetes亲和性调度.md) 76 | 77 | 78 | ### 集群监控 79 | * [手动安装 Prometheus](docs/52.Prometheus基本使用.md) 80 | * [监控 Kubernetes 集群应用](docs/53.监控Kubernetes集群应用.md) 81 | * [监控 Kubernetes 集群节点](docs/54.监控Kubernetes集群节点.md) 82 | * [监控 Kubernetes 常用资源对象](docs/55.监控Kubernetes常用资源对象.md) 83 | * [Grafana 的安装使用](docs/56.Grafana的安装使用.md) 84 | * [AlertManager 的使用](docs/57.AlertManager的使用.md) 85 | * [Prometheus Operator 的安装](docs/58.Prometheus Operator.md) 86 | * [自定义Prometheus Operator 监控项](docs/59.自定义Prometheus Operator 监控项.md) 87 | * [Prometheus Operator高级配置](docs/60.Prometheus Operator高级配置.md) 88 | 89 | 90 | ### 日志收集 91 | * [日志收集架构](docs/61.日志收集架构.md) 92 | * [搭建 EFK 日志系统](docs/62.搭建 EFK 日志系统.md) 93 | 94 | 95 | ### CI/CD: 96 | * [动态 Jenkins Slave](docs/36.Jenkins Slave.md) 97 | * [Jenkins Pipeline 部署 Kubernetes 应用](docs/37.Jenkins Pipeline.md) 98 | * [Jenkins BlueOcean](docs/38.Jenkins BlueOcean.md) 99 | * [Harbor](docs/63.Harbor.md) 100 | * [Gitlab](docs/64.Gitlab.md) 101 | * [Gitlab CI](docs/65.Gitlab CI.md) 102 | * [Devops](docs/66.devops.md) 103 | 104 | 105 | ### 其他: 106 | * [集群升级](docs/67.Upgrade集群.md) 107 | -------------------------------------------------------------------------------- /book.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "从 Docker 到 Kubernetes 进阶手册", 3 | "description": "从 Docker 入门一步步迁移到 Kubernetes 的进阶课程学习指南", 4 | "language": "zh-hans", 5 | "author": "阳明", 6 | "links": { 7 | "sidebar": { 8 | "阳明的博客": "https://www.qikqiak.com", 9 | "优点知识": "https://youdianzhishi.com", 10 | "我们一起学istio技术": "https://www.qikqiak.com/istio-book/", 11 | "python微服务实战": "https://www.qikqiak.com/tdd-book/" 12 | } 13 | }, 14 | "plugins": [ 15 | "prism", "prism-themes", "-highlight", 16 | "github", 17 | "codesnippet", 18 | "splitter", 19 | "simple-page-toc", 20 | "page-toc-button", 21 | "image-captions", 22 | "back-to-top-button", 23 | "-lunr", "-search", "search-plus", 24 | "github-buttons@2.1.0", 25 | "favicon@^0.0.2", 26 | "tbfed-pagefooter@^0.0.1", 27 | "theme-default", 28 | "sitemap-general", 29 | "3-ba", 30 | "ga", 31 | "adsense" 32 | ], 33 | "pluginsConfig": { 34 | "theme-default": { 35 | "showLevel": true 36 | }, 37 | "prism": { 38 | "css": [ 39 | "prism-themes/themes/prism-hopscotch.css" 40 | ] 41 | }, 42 | "github": { 43 | "url": "https://github.com/cnych/kubernetes-learning" 44 | }, 45 | "simple-page-toc": { 46 | "maxDepth": 3, 47 | "skipFirstH1": true 48 | }, 49 | "image-captions": { 50 | "caption": "_CAPTION_" 51 | }, 52 | "github-buttons": { 53 | "repo": "cnych/kubernetes-learning", 54 | "types": ["star"], 55 | "size": "small" 56 | }, 57 | "sitemap-general": { 58 | "prefix": "https://www.qikqiak.com/k8s-book/" 59 | }, 60 | "tbfed-pagefooter": { 61 | "copyright": "Copyright © qikqiak.com 2018", 62 | "modify_label": "Updated: ", 63 | "modify_format": "YYYY-MM-DD HH:mm:ss" 64 | }, 65 | "favicon": { 66 | "shortcut": "favicon.ico", 67 | "bookmark": "favicon.ico" 68 | }, 69 | "3-ba": { 70 | "token": "d611849735f187dd788dc054908f7d7a" 71 | }, 72 | "ga": { 73 | "token": "UA-69668147-3" 74 | }, 75 | "adsense": { 76 | "client": "ca-pub-5376999672787220", 77 | "slot": "3100272140", 78 | "format": "auto", 79 | "element": ".page-inner section", 80 | "position": "bottom" 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /configmapdemo/cm-demo1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cm-demo1 5 | data: 6 | data.1: hello 7 | data.2: world 8 | config: | 9 | property.1=value-1 10 | property.2=value-2 11 | property.3=value-3 -------------------------------------------------------------------------------- /configmapdemo/cm-demo2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: testcm1-pod 5 | spec: 6 | containers: 7 | - name: testcm1 8 | image: busybox 9 | command: ["/bin/sh", "-c", "env"] 10 | env: 11 | - name: DB_HOST 12 | valueFrom: 13 | configMapKeyRef: 14 | name: cm-demo4 15 | key: db.host 16 | - name: DB_PORT 17 | valueFrom: 18 | configMapKeyRef: 19 | name: cm-demo4 20 | key: db.port 21 | envFrom: 22 | - configMapRef: 23 | name: cm-demo2 24 | -------------------------------------------------------------------------------- /configmapdemo/cm-demo3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: testcm2-pod 5 | spec: 6 | containers: 7 | - name: testcm2 8 | image: busybox 9 | command: ["/bin/sh", "-c", "echo $(DB_HOST) $(DB_PORT)"] 10 | env: 11 | - name: DB_HOST 12 | valueFrom: 13 | configMapKeyRef: 14 | name: cm-demo4 15 | key: db.host 16 | - name: DB_PORT 17 | valueFrom: 18 | configMapKeyRef: 19 | name: cm-demo4 20 | key: db.port 21 | -------------------------------------------------------------------------------- /configmapdemo/cm-demo4.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: testcm4-pod 5 | spec: 6 | containers: 7 | - name: testcm4 8 | image: busybox 9 | command: ["/bin/sh", "-c", "cat /etc/config/redis.conf"] 10 | volumeMounts: 11 | - name: config-volume 12 | mountPath: /etc/config 13 | volumes: 14 | - name: config-volume 15 | configMap: 16 | name: cm-demo3 -------------------------------------------------------------------------------- /configmapdemo/cm-demo5.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: testcm5-pod 5 | spec: 6 | containers: 7 | - name: testcm5 8 | image: busybox 9 | command: ["/bin/sh", "-c", "cat /etc/config/path/to/mysql.conf"] 10 | volumeMounts: 11 | - name: config-volume 12 | mountPath: /etc/config 13 | volumes: 14 | - name: config-volume 15 | configMap: 16 | name: cm-demo2 17 | items: 18 | - key: mysql.conf 19 | path: path/to/mysql.conf -------------------------------------------------------------------------------- /daemonsetdemo/daemonset-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: nginx-ds 5 | labels: 6 | app: nginx 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | k8s-app: nginx 12 | spec: 13 | containers: 14 | - name: nginx 15 | image: nginx:1.7.9 16 | ports: 17 | - name: htpp 18 | containerPort: 80 19 | -------------------------------------------------------------------------------- /dashboard/admin-acount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: admin 7 | namespace: kube-system 8 | 9 | 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: ClusterRoleBinding 13 | metadata: 14 | name: admin 15 | roleRef: 16 | apiGroup: rbac.authorization.k8s.io 17 | kind: ClusterRole 18 | name: cluster-admin 19 | subjects: 20 | - kind: ServiceAccount 21 | name: admin 22 | namespace: kube-system 23 | -------------------------------------------------------------------------------- /dashboard/mac.token: -------------------------------------------------------------------------------- 1 | eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1wc3BtZyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjczM2M4OTU1LWI4YmYtMTFlOC1iMzc4LTAyNTAwMDAwMDAwMSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.J1Ii8-bSEe86_ORS97Xqe7yNz3ET2x7mfV9VW4bojbOM212Dv8jnpDbP7ZAQYaTXnr5_QkVAWv5Qqqng4OzeR3ykW1u51x6oyH2F_wFGPB2xwgow-2UTLy5dsRrPkiMv3lcPJ34BaVbjojzfLUI0Vvl8DPID_tvuZzlX1AVCgoQutOFAMQgmCnlezspisYjTJVgFD2vd7hVe5jN16r1oNRn9EYK3xAHyum3L9HcMg3cnOPFf8WlhVyAkMeFGG8qNjWtzW6gmXkyaM3NATh7-YdM9hKqkgm2DmvCkkB_NNtchhU_EdwD0xt4VcNQZQHFaYWYxmpmeJPBFPewdL-76mw -------------------------------------------------------------------------------- /dashboard/token: -------------------------------------------------------------------------------- 1 | eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi10b2tlbi1nbXdzcCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImY4ZDI3NjYyLTVhYjEtMTFlOC1hMTAxLTUyNTQwMGRiNGRmNyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbiJ9.bkLMp5QmJ4-VRxQrteA8JEkPtN6H7GZUAuWdxSxWeORpnd3oPSDX7iElH43tWZyaduTO0HHAC1X9fTI7gAVbKZHAlivbyF07wU5a0DwVG-Xw-M_PcnL42UI9FbCC4D8xCWvwWbRS_fPHTy7kkOCa-fZT26VsiT2tG-xTqIp1xaG6i87hK0dmFTB7NPdhRFY7L7Ceu0EBzpVd4zkglyFfJkoGjAQ7ShigYIzHIrf41U07OHtA34w7-8GmilNreGugP-wX1rWvki5afwL8iRDYFBzXKoZNZk25HOrp7--9WsjRRx14zvm0-8rDiXuFzl0yZaM3-dfslb08ysM1SAwMOA -------------------------------------------------------------------------------- /deploymentdemo/deploy-demo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-deploy 6 | namespace: test 7 | labels: 8 | app: nginx-demo 9 | spec: 10 | replicas: 3 11 | revisionHistoryLimit: 15 12 | minReadySeconds: 5 13 | strategy: 14 | type: RollingUpdate 15 | rollingUpdate: 16 | maxSurge: 1 17 | maxUnavailable: 1 18 | template: 19 | metadata: 20 | labels: 21 | app: nginx 22 | spec: 23 | containers: 24 | - name: nginx 25 | image: nginx 26 | ports: 27 | - containerPort: 80 28 | name: nginxweb 29 | -------------------------------------------------------------------------------- /deploymentdemo/deploy-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ngxdemo 5 | spec: 6 | selector: 7 | app: app 8 | type: NodePort 9 | ports: 10 | - name: http 11 | protocol: TCP 12 | port: 80 13 | targetPort: nginxweb 14 | -------------------------------------------------------------------------------- /dnsdemo/test-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-api 5 | spec: 6 | containers: 7 | - name: test-api 8 | image: busybox 9 | imagePullPolicy: IfNotPresent 10 | command: ["/bin/sh", "-c", "env"] 11 | dnsPolicy: ClusterFirst 12 | 13 | node01 14 | kube-dns CM 15 | cluster.local kubedns 16 | www.haimaxy.com -> node01 17 | 18 | kube-dns CM 19 | kube-dns ->.cluster.local ->kubedns 20 | -> acme.local -> 1.2.3.4 21 | -> www.haimaxy.com -> [8.8.8.8, 8.8.4.4] 22 | 23 | nginx-service.default.svc.cluster.local 24 | nginx-service.default 25 | nginx-service 26 | 27 | redis-0.redis-service.kube-ops.svc.cluster.local 28 | redis-1.redis-service.kube-ops.svc.cluster.local 29 | 30 | clusterIP -------------------------------------------------------------------------------- /dnsdemo/test-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deploy 5 | labels: 6 | k8s-app: nginx-demo 7 | spec: 8 | replicas: 2 9 | template: 10 | metadata: 11 | labels: 12 | app: nginx 13 | spec: 14 | containers: 15 | - name: nginx 16 | image: nginx:1.7.9 17 | imagePullPolicy: IfNotPresent 18 | ports: 19 | - containerPort: 80 20 | 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: nginx-service 26 | labels: 27 | names: nginx-service 28 | spec: 29 | selector: 30 | app: nginx 31 | ports: 32 | - port: 5000 33 | targetPort: 80 -------------------------------------------------------------------------------- /docs/1.课程介绍.md: -------------------------------------------------------------------------------- 1 | # 1. 课程介绍 2 | 之前一直有同学跟我说我`Docker`掌握得还可以,但是不知道怎么使用`Kubernetes`,网上的其他关于 Kubernetes 的课程费用又太高,这节课就是为你们准备的,当然如果你不了解 Docker,不了解 Kubernetes,都没有关系,我们这个课程会从 Docker 入门一步步深入,到 Kubernetes 的进阶使用的。所以大家完全没必要担心。 3 | ​​![course logo](./images/course-logo.png) 4 | 5 | 学完本课程以后,你将会对 Docker 和 Kubernetes 有一个更加深入的认识,我们会讲到: 6 | 7 | * Docker 的一些常用方法,当然我们的重点会在 Kubernetes 上面 8 | * 会用 kubeadm 来搭建一套 Kubernetes 的集群 9 | * 理解 Kubernetes 集群的运行原理 10 | * 常用的一些控制器使用方法 11 | * 还有 Kubernetes 的一些调度策略 12 | * Kubernetes的运维 13 | * 包管理工具 Helm 的使用 14 | * 最后我们会实现基于 Kubernetes 的 CI/CD 15 | 16 | 17 | --- 18 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 19 | 20 | ![qrcode](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 21 | -------------------------------------------------------------------------------- /docs/11.图形化管理和监控.md: -------------------------------------------------------------------------------- 1 | # 11. 图形化管理和监控 2 | 下面我们介绍几个可以用图形化的方式来管理`Docker`的工具。 3 | 4 | > Shipyard:https://github.com/shipyard/shipyard(已停止维护) 5 | 6 | ## Portainer 7 | [Portainer](https://portainer.io/)(基于 Go)是一个轻量级的管理界面,可让您轻松管理`Docker`主机或`Swarm`集群。 8 | 9 | `Portainer`的使用意图是简单部署。它包含可以在任何 Docker 引擎上运行的单个容器(Docker for Linux 和 Docker for Windows)。 10 | 11 | `Portainer`允许您管理 Docker 容器、image、volume、network 等。 它与独立的 Docker 引擎和 Docker Swarm 兼容。 12 | 13 | Docker 命令安装: 14 | ```shell 15 | $ docker volume create portainer_data 16 | $ docker run -d -p 9000:9000 -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer 17 | ``` 18 | 19 | Swarm集群部署: 20 | ```shell 21 | $ docker volume create portainer_data 22 | $ docker service create \ 23 | --name portainer \ 24 | --publish 9000:9000 \ 25 | --replicas=1 \ 26 | --constraint 'node.role == manager' \ 27 | --mount type=bind,src=//var/run/docker.sock,dst=/var/run/docker.sock \ 28 | --mount type=volume,src=portainer_data,dst=/data \ 29 | portainer/portainer \ 30 | -H unix:///var/run/docker.sock 31 | ``` 32 | 33 | Docker Compose 部署: 34 | ```yaml 35 | version: '2' 36 | services: 37 | portainer: 38 | image: portainer/portainer 39 | command: -H unix:///var/run/docker.sock 40 | volumes: 41 | - /var/run/docker.sock:/var/run/docker.sock 42 | - portainer_data:/data 43 | volumes: 44 | portainer_data: 45 | ``` 46 | ![portainer](./images/docker-ui-portainer.png) 47 | 48 | ## Rancher 49 | `Rancher`是一个开源的企业级容器管理平台。通过`Rancher`,企业不必自己使用一系列的开源软件去从头搭建容器服务平台。`Rancher`提供了在生产环境中使用管理`Docker`和`Kubernetes`的全栈化容器部署与管理平台。 50 | ​​![rancher](./images/docker-ui-rancher.png) 51 | 52 | 在后面学习`kubernetes`的课程的时候会给大家演示,用于我们快速搭建一个可运行`kubernetes`集群环境,非常方便。 53 | 54 | ## cAdvisor 55 | `cAdvisor`是`Google`开发的容器监控工具,我们来看看 cAdvisor 有什么能耐。 56 | 57 | * 监控 Docker Host 58 | cAdvisor 会显示当前 host 的资源使用情况,包括 CPU、内存、网络、文件系统等。 59 | 60 | * 监控容器 61 | 点击 Docker Containers 链接,显示容器列表。点击某个容器,比如 sysdig,进入该容器的监控页面。 62 | 63 | 以上就是 cAdvisor 的主要功能,总结起来主要两点: 64 | * 展示 Host 和容器两个层次的监控数据。 65 | * 展示历史变化数据。 66 | 67 | 由于`cAdvisor`提供的操作界面略显简陋,而且需要在不同页面之间跳转,并且只能监控一个 host,这不免会让人质疑它的实用性。但 cAdvisor 的一个亮点是它可以将监控到的数据导出给第三方工具,由这些工具进一步加工处理。 68 | 69 | 我们可以把 cAdvisor 定位为一个监控数据收集器,收集和导出数据是它的强项,而非展示数据。 70 | cAdvisor 支持很多第三方工具,其中就包括后面我们重点要学习的`Prometheus`。 71 | ```shell 72 | $ docker run \ 73 | --volume=/:/rootfs:ro \ 74 | --volume=/var/run:/var/run:rw \ 75 | --volume=/sys:/sys:ro \ 76 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 77 | --volume=/dev/disk/:/dev/disk:ro \ 78 | --publish=8080:8080 \ 79 | --detach=true \ 80 | --name=cadvisor \ 81 | google/cadvisor:latest 82 | ``` 83 | 84 | 通过访问地址:http://127.0.0.1:8080/containers/ 可以查看所有容器信息: 85 | ![docker cAdvisor](./images/docker-cadvisor.png) 86 | ​​ 87 | 除此之外,cAdvisor 还提供了一个 Rest API:https://github.com/google/cadvisor/blob/master/docs/api.md 88 | 89 | cAdvisor 通过该 REST API 暴露监控数据,格式如下: 90 | ``` 91 | http://:/api// 92 | ``` 93 | 94 | 95 | 96 | --- 97 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 98 | 99 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 100 | 101 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 102 | -------------------------------------------------------------------------------- /docs/12.Docker的多阶段构建.md: -------------------------------------------------------------------------------- 1 | # 12. Docker 的多阶段构建 2 | 3 | `Docker`的口号是 **Build,Ship,and Run Any App,Anywhere**,在我们使用 Docker 的大部分时候,的确能感觉到其优越性,但是往往在我们 Build 一个应用的时候,是将我们的源代码也构建进去的,这对于类似于 golang 这样的编译型语言肯定是不行的,因为实际运行的时候我只需要把最终构建的二进制包给你就行,把源码也一起打包在镜像中,需要承担很多风险,即使是脚本语言,在构建的时候也可能需要使用到一些上线的工具,这样无疑也增大了我们的镜像体积。 4 | 5 | 6 | ## 示例 7 | 比如我们现在有一个最简单的 golang 服务,需要构建一个最小的`Docker` 镜像,源码如下: 8 | ```go 9 | package main 10 | import ( 11 | "github.com/gin-gonic/gin" 12 | "net/http" 13 | ) 14 | func main() { 15 | router := gin.Default() 16 | router.GET("/ping", func(c *gin.Context) { 17 | c.String(http.StatusOK, "PONG") 18 | }) 19 | router.Run(":8080") 20 | } 21 | ``` 22 | 23 | ## 解决方案 24 | 我们最终的目的都是将最终的可执行文件放到一个最小的镜像(比如`alpine`)中去执行,怎样得到最终的编译好的文件呢?基于 `Docker` 的指导思想,我们需要在一个标准的容器中编译,比如在一个 Ubuntu 镜像中先安装编译的环境,然后编译,最后也在该容器中执行即可。 25 | 26 | 但是如果我们想把编译后的文件放置到 `alpine` 镜像中执行呢?我们就得通过上面的 Ubuntu 镜像将编译完成的文件通过 `volume` 挂载到我们的主机上,然后我们再将这个文件挂载到 `alpine` 镜像中去。 27 | 28 | 这种解决方案理论上肯定是可行的,但是这样的话在构建镜像的时候我们就得定义两步了,第一步是先用一个通用的镜像编译镜像,第二步是将编译后的文件复制到 `alpine` 镜像中执行,而且通用镜像编译后的文件在 `alpine` 镜像中不一定能执行。 29 | 30 | 定义编译阶段的 `Dockerfile`:(保存为**Dockerfile.build**) 31 | ```docker 32 | FROM golang 33 | WORKDIR /go/src/app 34 | ADD . /go/src/app 35 | RUN go get -u -v github.com/kardianos/govendor 36 | RUN govendor sync 37 | RUN GOOS=linux GOARCH=386 go build -v -o /go/src/app/app-server 38 | ``` 39 | 40 | 定义`alpine`镜像:(保存为**Dockerfile.old**) 41 | ```docker 42 | FROM alpine:latest 43 | RUN apk add -U tzdata 44 | RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime 45 | WORKDIR /root/ 46 | COPY app-server . 47 | CMD ["./app-server"] 48 | ``` 49 | 50 | 根据我们的执行步骤,我们还可以简单定义成一个脚本:(保存为**build.sh**) 51 | ```shell 52 | #!/bin/sh 53 | echo Building cnych/docker-multi-stage-demo:build 54 | 55 | docker build -t cnych/docker-multi-stage-demo:build . -f Dockerfile.build 56 | 57 | docker create --name extract cnych/docker-multi-stage-demo:build 58 | docker cp extract:/go/src/app/app-server ./app-server 59 | docker rm -f extract 60 | 61 | echo Building cnych/docker-multi-stage-demo:old 62 | 63 | docker build --no-cache -t cnych/docker-multi-stage-demo:old . -f Dockerfile.old 64 | rm ./app-server 65 | ``` 66 | 当我们执行完上面的构建脚本后,就实现了我们的目标。 67 | 68 | ## 多阶段构建 69 | 有没有一种更加简单的方式来实现上面的镜像构建过程呢?**Docker 17.05**版本以后,官方就提供了一个新的特性:`Multi-stage builds`(多阶段构建)。 70 | 使用多阶段构建,你可以在一个 `Dockerfile` 中使用多个 FROM 语句。每个 FROM 指令都可以使用不同的基础镜像,并表示开始一个新的构建阶段。你可以很方便的将一个阶段的文件复制到另外一个阶段,在最终的镜像中保留下你需要的内容即可。 71 | 72 | 我们可以调整前面一节的 `Dockerfile` 来使用多阶段构建:(保存为**Dockerfile**) 73 | ```docker 74 | FROM golang AS build-env 75 | ADD . /go/src/app 76 | WORKDIR /go/src/app 77 | RUN go get -u -v github.com/kardianos/govendor 78 | RUN govendor sync 79 | RUN GOOS=linux GOARCH=386 go build -v -o /go/src/app/app-server 80 | 81 | FROM alpine 82 | RUN apk add -U tzdata 83 | RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime 84 | COPY --from=build-env /go/src/app/app-server /usr/local/bin/app-server 85 | EXPOSE 8080 86 | CMD [ "app-server" ] 87 | ``` 88 | 现在我们只需要一个`Dockerfile`文件即可,也不需要拆分构建脚本了,只需要执行 build 命令即可: 89 | ```shell 90 | $ docker build -t cnych/docker-multi-stage-demo:latest . 91 | ``` 92 | 93 | 默认情况下,构建阶段是没有命令的,我们可以通过它们的索引来引用它们,第一个 FROM 指令从`0`开始,我们也可以用`AS`指令为阶段命令,比如我们这里的将第一阶段命名为`build-env`,然后在其他阶段需要引用的时候使用`--from=build-env`参数即可。 94 | 95 | 最后我们简单的运行下该容器测试: 96 | ```shell 97 | $ docker run --rm -p 8080:8080 cnych/docker-multi-stage-demo:latest 98 | ``` 99 | 运行成功后,我们可以在浏览器中打开`http://127.0.0.1:8080/ping`地址,可以看到**PONG**返回。 100 | 101 | 现在我们就把两个镜像的文件最终合并到一个镜像里面了。 102 | 103 | 文章中涉及到代码可以前往 github 查看:[https://github.com/cnych/docker-multi-stage-demo](https://github.com/cnych/docker-multi-stage-demo) 104 | 105 | 106 | 107 | --- 108 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 109 | 110 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 111 | 112 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 113 | 114 | -------------------------------------------------------------------------------- /docs/14.Kubernetes初体验.md: -------------------------------------------------------------------------------- 1 | # 14. Kubernetes 初体验 2 | 今天开始正式进入 Kubernetes 的课程学习,Kubernetes 我们已经听过很多了,那么什么是Kubernetes呢? 3 | 4 | ## 简介 5 | Kubernetes 是 Google 团队发起的一个开源项目,它的目标是管理跨多个主机的容器,用于自动部署、扩展和管理容器化的应用程序,主要实现语言为 Go 语言。Kubernetes 的组件和架构还是相对较复杂的,如果我们一上来就给大家讲解这些概念,可能很多同学都消化不了,所以我们先让我们的同学来使用我们的Kubernetes,去感受下,去体验下里面的一些概念和用法,等你对这些基本概念熟悉以后,再来给大家讲解Kubernetes的组件和架构应该就更加容易了。 6 | 7 | 怎样体验呢?当然最好的办法就是我们自己搭建一套集群了,但是如果完完全全都我们手动去搭建的话,第一是太耗时,第二是太复杂,可能我们现在还没有这个实力,没关系,我们可以使用一些工具来辅助我们。 8 | 9 | 1.`katacoda`的课程:[katacoda](https://www.katacoda.com/courses/kubernetes),可以在网站上帮我们启动一个minikube的环境(学习) 10 | ​​![katacoda](./images/k8s-katacoda.png) 11 | 12 | 2.需要我们自己来搭建的 - [`Rancher`](https://rancher.com/),我们之前给大家介绍过,如果你网速不好的话安装 Rancher 可能需要花费一点时间,不过这是值得的。(测试) 13 | ```shell 14 | $ docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher:v2.0.0 # 查看日志 15 | $ docker logs -f rancher 16 | ``` 17 | ![rancher](./images/k8s-rancher.png) 18 | 19 | 3.Docker for MAC/Windows(推荐)/[minikube](https://github.com/kubernetes/minikube)/(本地) 20 | 21 | > Docker for MAC/Windows 和 minikube 安装之前需要安装[kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)工具 22 | 23 | 4.[kubeadm](https://k8s.qikqiak.com/docs/16.%E7%94%A8%20kubeadm%20%E6%90%AD%E5%BB%BA%E9%9B%86%E7%BE%A4%E7%8E%AF%E5%A2%83.html)(测试) 24 | 25 | 5.[二进制纯手动搭建](https://blog.qikqiak.com/post/manual-install-high-available-kubernetes-cluster/)(生产) 26 | 27 | ## 集群 28 | 集群是一组节点,这些节点可以是物理服务器或者虚拟机,在他上面安装了Kubernetes环境。 29 | ​​![k8s cluster](./images/k8s-cluster.png) 30 | 31 | **Master 负责管理集群**, master 协调集群中的所有活动,例如调度应用程序、维护应用程序的所需状态、扩展应用程序和滚动更新。 32 | **节点是 Kubernetes 集群中的工作机器,可以是物理机或虚拟机。**每个工作节点都有一个 kubelet,它是管理节点并与 Kubernetes Master 节点进行通信的代理。节点上还应具有处理容器操作的容器运行时,例如 [Docker](https://www.docker.com/) 或 [rkt](https://coreos.com/rkt/)。一个 Kubernetes 工作集群至少有三个节点。 33 | Master 管理集群,而 节点 用于托管正在运行的应用程序。 34 | 35 | 当您在 Kubernetes 上部署应用程序时,您可以告诉 master 启动应用程序容器。Master 调度容器在集群的节点上运行。 节点使用 Master 公开的 Kubernetes API 与 Master 通信。用户也可以直接使用 Kubernetes 的 API 与集群交互。 36 | 37 | ### Pod 38 | Pod 是一组紧密关联的容器集合,它们共享 PID、IPC、Network 和 UTS namespace,是Kubernetes 调度的基本单位。Pod 的设计理念是支持多个容器在一个 Pod 中共享网络和文件系统,可以通过进程间通信和文件共享这种简单高效的方式组合完成服务。 39 | ![k8s pod](./images/k8s-pod.png) 40 | ​​ 41 | 在 Kubernetes 中,所有对象都使用 manifest(yaml或json)来定义,比如一个简单的 nginx 服务可以定义为 nginx.yaml,它包含一个镜像为 nginx 的容器: 42 | ```yaml 43 | apiVersion: v1 44 | kind: Pod 45 | metadata: 46 | name: nginx 47 | labels: 48 | app: nginx 49 | spec: 50 | containers: 51 | - name: nginx 52 | image: nginx 53 | ports: 54 | - containerPort: 80 55 | ``` 56 | 57 | ### Label 58 | Label 是识别 Kubernetes 对象的标签,以 key/value 的方式附加到对象上(key最长不能超过63字节,value 可以为空,也可以是不超过253字节的字符串)。 59 | Label 不提供唯一性,并且实际上经常是很多对象(如Pods)都使用相同的 label 来标志具体的应用。 60 | Label 定义好后其他对象可以使用 Label Selector 来选择一组相同 label 的对象(比如Service 用 label 来选择一组 Pod)。Label Selector支持以下几种方式: 61 | 62 | * 等式,如app=nginx和env!=production 63 | * 集合,如env in (production, qa) 64 | * 多个label(它们之间是AND关系),如app=nginx,env=test 65 | 66 | ### Namespace 67 | Namespace 是对一组资源和对象的抽象集合,比如可以用来将系统内部的对象划分为不同的项目组或用户组。常见的 pods, services,deployments 等都是属于某一个 namespace 的(默认是default),而 Node, PersistentVolumes 等则不属于任何 namespace。 68 | 69 | ### Deployment 70 | 是否手动创建 Pod,如果想要创建同一个容器的多份拷贝,需要一个个分别创建出来么,能否将Pods划到逻辑组里? 71 | 72 | Deployment 确保任意时间都有指定数量的 Pod“副本”在运行。如果为某个 Pod 创建了Deployment 并且指定3个副本,它会创建3个 Pod,并且持续监控它们。如果某个 Pod 不响应,那么 Deployment 会替换它,保持总数为3. 73 | 74 | 如果之前不响应的 Pod 恢复了,现在就有4个 Pod 了,那么 Deployment 会将其中一个终止保持总数为3。如果在运行中将副本总数改为5,Deployment 会立刻启动2个新 Pod,保证总数为5。Deployment 还支持回滚和滚动升级。 75 | 76 | 当创建 Deployment 时,需要指定两个东西: 77 | 78 | * Pod模板:用来创建 Pod 副本的模板 79 | * Label标签:Deployment 需要监控的 Pod 的标签。 80 | 81 | 现在已经创建了 Pod 的一些副本,那么在这些副本上如何均衡负载呢?我们需要的是 Service。 82 | 83 | ### Service 84 | Service 是应用服务的抽象,通过 labels 为应用提供负载均衡和服务发现。匹配 labels 的Pod IP 和端口列表组成 endpoints,由 kube-proxy 负责将服务 IP 负载均衡到这些endpoints 上。 85 | 86 | 每个 Service 都会自动分配一个 cluster IP(仅在集群内部可访问的虚拟地址)和 DNS 名,其他容器可以通过该地址或 DNS 来访问服务,而不需要了解后端容器的运行。 87 | ![k8s service](./images/k8s-service.png) 88 | 89 | 90 | 91 | --- 92 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 93 | 94 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 95 | 96 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 97 | -------------------------------------------------------------------------------- /docs/15.基本概念与组件.md: -------------------------------------------------------------------------------- 1 | # 15. 基本概念与组件 2 | 3 | ## 基本概念 4 | Kubernetes 中的绝大部分概念都抽象成 Kubernetes 管理的一种资源对象,下面我们一起复习一下我们上节课遇到的一些资源对象: 5 | 6 | * Master:Master 节点是 Kubernetes 集群的控制节点,负责整个集群的管理和控制。Master 节点上包含以下组件: 7 | * kube-apiserver:集群控制的入口,提供 HTTP REST 服务 8 | * kube-controller-manager:Kubernetes 集群中所有资源对象的自动化控制中心 9 | * kube-scheduler:负责 Pod 的调度 10 | * Node:Node 节点是 Kubernetes 集群中的工作节点,Node 上的工作负载由 Master 节点分配,工作负载主要是运行容器应用。Node 节点上包含以下组件: 11 | * kubelet:负责 Pod 的创建、启动、监控、重启、销毁等工作,同时与 Master 节点协作,实现集群管理的基本功能。 12 | * kube-proxy:实现 Kubernetes Service 的通信和负载均衡 13 | * 运行容器化(Pod)应用 14 | 15 | * Pod: Pod 是 Kubernetes 最基本的部署调度单元。每个 Pod 可以由一个或多个业务容器和一个根容器(Pause 容器)组成。一个 Pod 表示某个应用的一个实例 16 | * ReplicaSet:是 Pod 副本的抽象,用于解决 Pod 的扩容和伸缩 17 | * Deployment:Deployment 表示部署,在内部使用ReplicaSet 来实现。可以通过 Deployment 来生成相应的 ReplicaSet 完成 Pod 副本的创建 18 | * Service:Service 是 Kubernetes 最重要的资源对象。Kubernetes 中的 Service 对象可以对应微服务架构中的微服务。Service 定义了服务的访问入口,服务的调用者通过这个地址访问 Service 后端的 Pod 副本实例。Service 通过 Label Selector 同后端的 Pod 副本建立关系,Deployment 保证后端Pod 副本的数量,也就是保证服务的伸缩性。 19 | 20 | ![k8s basic](./images/k8s-basic.png) 21 | ​​ 22 | 23 | Kubernetes 主要由以下几个核心组件组成: 24 | * etcd 保存了整个集群的状态,就是一个数据库; 25 | * apiserver 提供了资源操作的唯一入口,并提供认证、授权、访问控制、API 注册和发现等机制; 26 | * controller manager 负责维护集群的状态,比如故障检测、自动扩展、滚动更新等; 27 | * scheduler 负责资源的调度,按照预定的调度策略将 Pod 调度到相应的机器上; 28 | * kubelet 负责维护容器的生命周期,同时也负责 Volume(CSI)和网络(CNI)的管理; 29 | * Container runtime 负责镜像管理以及 Pod 和容器的真正运行(CRI); 30 | * kube-proxy 负责为 Service 提供 cluster 内部的服务发现和负载均衡; 31 | 32 | 当然了除了上面的这些核心组件,还有一些推荐的插件: 33 | * kube-dns 负责为整个集群提供 DNS 服务 34 | * Ingress Controller 为服务提供外网入口 35 | * Heapster 提供资源监控 36 | * Dashboard 提供 GUI 37 | 38 | 39 | ## 组件通信 40 | Kubernetes 多组件之间的通信原理: 41 | 42 | * apiserver 负责 etcd 存储的所有操作,且只有 apiserver 才直接操作 etcd 集群 43 | * apiserver 对内(集群中的其他组件)和对外(用户)提供统一的 REST API,其他组件均通过 apiserver 进行通信 44 | 45 | * controller manager、scheduler、kube-proxy 和 kubelet 等均通过 apiserver watch API 监测资源变化情况,并对资源作相应的操作 46 | * 所有需要更新资源状态的操作均通过 apiserver 的 REST API 进行 47 | 48 | * apiserver 也会直接调用 kubelet API(如 logs, exec, attach 等),默认不校验 kubelet 证书,但可以通过 `--kubelet-certificate-authority` 开启(而 GKE 通过 SSH 隧道保护它们之间的通信) 49 | 50 | 比如最典型的创建 Pod 的流程: 51 | ​​![k8s pod](./images/k8s-pod-process.png) 52 | 53 | * 用户通过 REST API 创建一个 Pod 54 | * apiserver 将其写入 etcd 55 | * scheduluer 检测到未绑定 Node 的 Pod,开始调度并更新 Pod 的 Node 绑定 56 | * kubelet 检测到有新的 Pod 调度过来,通过 container runtime 运行该 Pod 57 | * kubelet 通过 container runtime 取到 Pod 状态,并更新到 apiserver 中 58 | 59 | 60 | 61 | --- 62 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 63 | 64 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 65 | 66 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 67 | -------------------------------------------------------------------------------- /docs/17.安装 Dashboard 插件.md: -------------------------------------------------------------------------------- 1 | # 17. 安装 Dashboard 插件 2 | Kubernetes Dashboard 是 k8s集群的一个 WEB UI管理工具,代码托管在 github 上,地址:[https://github.com/kubernetes/dashboard](https://github.com/kubernetes/dashboard) 3 | 4 | ## 安装: 5 | 直接使用官方的配置文件安装即可: 6 | ```shell 7 | $ wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml 8 | 9 | ``` 10 | 11 | 为了测试方便,我们将`Service`改成`NodePort`类型,注意 YAML 中最下面的 Service 部分新增一个`type=NodePort`: 12 | ```yaml 13 | kind: Service 14 | apiVersion: v1 15 | metadata: 16 | labels: 17 | k8s-app: kubernetes-dashboard 18 | name: kubernetes-dashboard 19 | namespace: kube-system 20 | spec: 21 | ports: 22 | - port: 443 23 | targetPort: 8443 24 | type: NodePort 25 | selector: 26 | k8s-app: kubernetes-dashboard 27 | ``` 28 | 29 | 然后直接部署新版本的`dashboard`即可: 30 | ```shell 31 | $ kubectl create -f kubernetes-dashboard.yaml 32 | ``` 33 | 34 | 然后我们可以查看 dashboard 的外网访问端口: 35 | ```shell 36 | $ kubectl get svc kubernetes-dashboard -n kube-system 37 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 38 | haproxy ClusterIP 10.254.125.90 8440/TCP,8442/TCP 2d 39 | kubernetes-dashboard NodePort 10.254.122.185 443:31694/TCP 10s 40 | ``` 41 | 42 | 然后直接访问集群中的任何一个节点 IP 加上上面的**31694**端口即可打开 dashboard 页面了 43 | 44 | > 由于 dashboard 默认是自建的 https 证书,该证书是不受浏览器信任的,所以我们需要强制跳转就可以了。 45 | 46 | 默认 dashboard 会跳转到登录页面,我们可以看到 dashboard 提供了`Kubeconfig`和`token`两种登录方式,我们可以直接跳过或者使用本地的`Kubeconfig`文件进行登录,可以看到会跳转到如下页面: 47 | ![dashboard login page](./images/dashboard-login.png) 48 | 49 | 这是由于该用户没有对`default`命名空间的访问权限。 50 | ​​ 51 | ## 身份认证 52 | 登录 dashboard 的时候支持 Kubeconfig 和token 两种认证方式,Kubeconfig 中也依赖token 字段,所以生成token 这一步是必不可少的。 53 | 54 | ### 生成token 55 | 我们创建一个admin用户并授予admin 角色绑定,使用下面的yaml文件创建admin用户并赋予他管理员权限,然后就可以通过token 登陆dashbaord,这种认证方式本质实际上是通过Service Account 的身份认证加上Bearer token请求 API server 的方式实现,参考 [Kubernetes 中的认证](https://kubernetes.io/docs/admin/authentication/)。 56 | ```yaml 57 | kind: ClusterRoleBinding 58 | apiVersion: rbac.authorization.k8s.io/v1beta1 59 | metadata: 60 | name: admin 61 | annotations: 62 | rbac.authorization.kubernetes.io/autoupdate: "true" 63 | roleRef: 64 | kind: ClusterRole 65 | name: cluster-admin 66 | apiGroup: rbac.authorization.k8s.io 67 | subjects: 68 | - kind: ServiceAccount 69 | name: admin 70 | namespace: kube-system 71 | 72 | --- 73 | apiVersion: v1 74 | kind: ServiceAccount 75 | metadata: 76 | name: admin 77 | namespace: kube-system 78 | labels: 79 | kubernetes.io/cluster-service: "true" 80 | addonmanager.kubernetes.io/mode: Reconcile 81 | ``` 82 | 83 | 上面的`admin`用户创建完成后我们就可以获取到该用户对应的`token`了,如下命令: 84 | ```shell 85 | $ kubectl get secret -n kube-system|grep admin-token 86 | admin-token-d5jsg kubernetes.io/service-account-token 3 1d 87 | $ kubectl get secret admin-token-d5jsg -o jsonpath={.data.token} -n kube-system |base64 -d# 会生成一串很长的base64后的字符串 88 | ``` 89 | 90 | 然后在 dashboard 登录页面上直接使用上面得到的 token 字符串即可登录,这样就可以拥有管理员权限操作整个 kubernetes 集群的对象,当然你也可以为你的登录用户新建一个指定操作权限的用户。​​ 91 | ![dashboard](./images/dashboard.png) 92 | 93 | 94 | 95 | --- 96 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 97 | 98 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 99 | 100 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 101 | -------------------------------------------------------------------------------- /docs/19.静态 Pod.md: -------------------------------------------------------------------------------- 1 | # 静态 Pod 2 | 我们上节课给大家讲解了 YAML 文件的使用,也手动的创建了一个简单的 Pod,这节课开始我们就来深入的学习下我们的 Pod。在Kubernetes集群中除了我们经常使用到的普通的 Pod 外,还有一种特殊的 Pod,叫做`Static Pod`,就是我们说的静态 Pod,静态 Pod 有什么特殊的地方呢? 3 | 4 | 静态 Pod 直接由特定节点上的`kubelet`进程来管理,不通过 master 节点上的`apiserver`。无法与我们常用的控制器`Deployment`或者`DaemonSet`进行关联,它由`kubelet`进程自己来监控,当`pod`崩溃时重启该`pod`,`kubelet`也无法对他们进行健康检查。静态 pod 始终绑定在某一个`kubelet`,并且始终运行在同一个节点上。 5 | `kubelet`会自动为每一个静态 pod 在 Kubernetes 的 apiserver 上创建一个镜像 Pod(Mirror Pod),因此我们可以在 apiserver 中查询到该 pod,但是不能通过 apiserver 进行控制(例如不能删除)。 6 | 7 | 创建静态 Pod 有两种方式:配置文件和 HTTP 两种方式 8 | 9 | ### 配置文件 10 | 配置文件就是放在特定目录下的标准的 JSON 或 YAML 格式的 pod 定义文件。用`kubelet --pod-manifest-path=`来启动`kubelet`进程,kubelet 定期的去扫描这个目录,根据这个目录下出现或消失的 YAML/JSON 文件来创建或删除静态 pod。 11 | 12 | 比如我们在 node01 这个节点上用静态 pod 的方式来启动一个 nginx 的服务。我们登录到node01节点上面,可以通过下面命令找到kubelet对应的启动配置文件 13 | ```shell 14 | $ systemctl status kubelet 15 | ``` 16 | 17 | 配置文件路径为: 18 | ```shell 19 | $ /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 20 | ``` 21 | 22 | 打开这个文件我们可以看到其中有一条如下的环境变量配置: 23 | `Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true"` 24 | 25 | 所以如果我们通过`kubeadm`的方式来安装的集群环境,对应的`kubelet`已经配置了我们的静态 Pod 文件的路径,那就是`/etc/kubernetes/manifests`,所以我们只需要在该目录下面创建一个标准的 Pod 的 JSON 或者 YAML 文件即可: 26 | 27 | 如果你的 kubelet 启动参数中没有配置上面的`--pod-manifest-path`参数的话,那么添加上这个参数然后重启 kubelet 即可。 28 | ```yaml 29 | [root@ node01 ~] $ cat </etc/kubernetes/manifest/static-web.yaml 30 | apiVersion: v1 31 | kind: Pod 32 | metadata: 33 | name: static-web 34 | labels: 35 | app: static 36 | spec: 37 | containers: 38 | - name: web 39 | image: nginx 40 | ports: 41 | - name: web 42 | containerPort: 80 43 | EOF 44 | ``` 45 | 46 | ### 通过 HTTP 创建静态 Pods 47 | kubelet 周期地从`–manifest-url=`参数指定的地址下载文件,并且把它翻译成 JSON/YAML 格式的 pod 定义。此后的操作方式与`–pod-manifest-path=`相同,kubelet 会不时地重新下载该文件,当文件变化时对应地终止或启动静态 pod。 48 | 49 | ### 静态pods的动作行为 50 | kubelet 启动时,由`--pod-manifest-path= or --manifest-url=`参数指定的目录下定义的所有 pod 都会自动创建,例如,我们示例中的 static-web。(可能要花些时间拉取nginx 镜像,耐心等待…) 51 | ```shell 52 | [root@node01 ~] $ docker ps 53 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 54 | f6d05272b57e nginx:latest "nginx" 8 minutes ago Up 8 minutes k8s_web.6f802af4_static-web-fk-node1_default_67e24ed9466ba55986d120c867395f3c_378e5f3c 55 | ``` 56 | 57 | 现在我们通过`kubectl`工具可以看到这里创建了一个新的镜像 Pod: 58 | ```shell 59 | [root@node01 ~] $ kubectl get pods 60 | NAME READY STATUS RESTARTS AGE 61 | static-web-my-node01 1/1 Running 0 2m 62 | ``` 63 | 64 | 静态 pod 的标签会传递给镜像 Pod,可以用来过滤或筛选。 65 | 需要注意的是,我们不能通过 API 服务器来删除静态 pod(例如,通过[kubectl](https://kubernetes.io/docs/user-guide/kubectl/)命令),kebelet 不会删除它。 66 | ```shell 67 | [root@node01 ~] $ kubectl delete pod static-web-my-node01 68 | [root@node01 ~] $ kubectl get pods 69 | NAME READY STATUS RESTARTS AGE 70 | static-web-my-node01 1/1 Running 0 12s 71 | ``` 72 | 73 | 我们尝试手动终止容器,可以看到kubelet很快就会自动重启容器。 74 | ```shell 75 | [root@node01 ~] $ docker ps 76 | CONTAINER ID IMAGE COMMAND CREATED ... 77 | 5b920cbaf8b1 nginx:latest "nginx -g 'daemon of 2 seconds ago ... 78 | ``` 79 | 80 | ### 静态pods的动态增加和删除 81 | 运行中的kubelet周期扫描配置的目录(我们这个例子中就是/etc/kubernetes/manifests)下文件的变化,当这个目录中有文件出现或消失时创建或删除pods。 82 | ```shell 83 | [root@node01 ~] $ mv /etc/kubernetes/manifests/static-web.yaml /tmp 84 | [root@node01 ~] $ sleep 20 85 | [root@node01 ~] $ docker ps 86 | // no nginx container is running 87 | [root@node01 ~] $ mv /tmp/static-web.yaml /etc/kubernetes/manifests 88 | [root@node01 ~] $ sleep 20 89 | [root@node01 ~] $ docker ps 90 | CONTAINER ID IMAGE COMMAND CREATED ... 91 | e7a62e3427f1 nginx:latest "nginx -g 'daemon of 27 seconds ago 92 | ``` 93 | 94 | 其实我们用 kubeadm 安装的集群,master 节点上面的几个重要组件都是用静态 Pod 的方式运行的,我们登录到 master 节点上查看`/etc/kubernetes/manifests`目录: 95 | ```shell 96 | [root@master ~]# ls /etc/kubernetes/manifests/ 97 | etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml 98 | ``` 99 | 100 | 现在明白了吧,这种方式也为我们将集群的一些组件容器化提供了可能,因为这些 Pod 都不会受到 apiserver 的控制,不然我们这里`kube-apiserver`怎么自己去控制自己呢?万一不小心把这个 Pod 删掉了呢?所以只能有`kubelet`自己来进行控制,这就是我们所说的静态 Pod。 101 | 102 | 103 | 104 | --- 105 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 106 | 107 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 108 | 109 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 110 | -------------------------------------------------------------------------------- /docs/2.Docker 简介.md: -------------------------------------------------------------------------------- 1 | # 2. Docker 简介 2 | 3 | ## 什么是 Docker? 4 | `Docker`的英文翻译是“搬运工”的意思,他搬运的东西就是我们常说的集装箱`Container`,Container 里面装的是任意类型的 App,我们的开发人员可以通过 Docker 将App 变成一种标准化的、可移植的、自管理的组件,我们可以在任何主流的操作系统中开发、调试和运行。 5 | 6 | 从概念上来看 Docker 和我们传统的虚拟机比较类似,只是更加轻量级,更加方便使,Docker 和虚拟机最主要的区别有以下几点: 7 | 8 | * 虚拟化技术依赖的是物理CPU和内存,是硬件级别的;而我们的 Docker 是构建在操作系统层面的,利用操作系统的容器化技术,所以 Docker 同样的可以运行在虚拟机上面。 9 | * 我们知道虚拟机中的系统就是我们常说的操作系统镜像,比较复杂;而 Docker 比较轻量级,我们可以用 Docker 部署一个独立的 Redis,就类似于在虚拟机当中安装一个 Redis 应用,但是我们用 Docker 部署的应用是完全隔离的。 10 | * 我们都知道传统的虚拟化技术是通过快照来保存状态的;而 Docker 引入了类似于源码管理的机制,将容器的快照历史版本一一记录下来,切换成本非常之低。 11 | * 传统虚拟化技术在构建系统的时候非常复杂;而 Docker 可以通过一个简单的 Dockerfile 文件来构建整个容器,更重要的是 Dockerfile 可以手动编写,这样应用程序开发人员可以通过发布 Dockerfile 来定义应用的环境和依赖,这样对于持续交付非常有利。 12 | ![what-is-docker](./images/docker-what.png) 13 | ​​​​ 14 | 15 | ## 为啥要用容器? 16 | 应用容器是个啥样子呢,一个做好的应用容器长得就像一个装好了一组特定应用的虚拟机一样,比如我现在想用 Redis,那我就找个装好了 Redis 的容器就可以了,然后运行起来,我就能直接使用了。 17 | 18 | 那为什么不能直接安装一个 Redis 呢?肯定是可行的,但是有的时候根据每个人电脑的不同,在安装的时候可能会报出各种各样的错误,万一你的机器中毒了,你的电脑挂了,你所有的服务都需要重新安装。但是有了 Docker 或者说有了容器就不一样了,你就相当于有了一个可以运行起来的虚拟机,只要你能运行容器,Redis 的配置就省了。而且如果你想换个电脑,没问题,很简单,直接把容器”端过来”就可以使用容器里面的服务了。 19 | 20 | ## Docker Engine 21 | `Docker Engine`是一个**C/S**架构的应用程序,主要包含下面几个组件: 22 | 23 | * 常驻后台进程`Dockerd` 24 | * 一个用来和 Dockerd 交互的 REST API Server 25 | * 命令行`CLI`接口,通过和 REST API 进行交互(我们经常使用的 docker 命令) 26 | 27 | ![docker engine](./images/docker-engine.png) 28 | ​​ 29 | 30 | ## Docker 架构 31 | Docker 使用 C/S (客户端/服务器)体系的架构,Docker 客户端与 Docker 守护进程通信,Docker 守护进程负责构建,运行和分发 Docker 容器。Docker 客户端和守护进程可以在同一个系统上运行,也可以将 Docker 客户端连接到远程 Docker 守护进程。Docker 客户端和守护进程使用 REST API 通过`UNIX`套接字或网络接口进行通信。 32 | ![docker structrue](./images/docker-structrue.png) 33 | ​​ 34 | * Docker Damon:dockerd,用来监听 Docker API 的请求和管理 Docker 对象,比如镜像、容器、网络和 Volume。 35 | * Docker Client:docker,docker client 是我们和 Docker 进行交互的最主要的方式方法,比如我们可以通过 docker run 命令来运行一个容器,然后我们的这个 client 会把命令发送给上面的 Dockerd,让他来做真正事情。 36 | * Docker Registry:用来存储 Docker 镜像的仓库,Docker Hub 是 Docker 官方提供的一个公共仓库,而且 Docker 默认也是从 Docker Hub 上查找镜像的,当然你也可以很方便的运行一个私有仓库,当我们使用 docker pull 或者 docker run 命令时,就会从我们配置的 Docker 镜像仓库中去拉取镜像,使用 docker push 命令时,会将我们构建的镜像推送到对应的镜像仓库中。 37 | * Images:镜像,镜像是一个只读模板,带有创建 Docker 容器的说明,一般来说的,镜像会基于另外的一些基础镜像并加上一些额外的自定义功能。比如,你可以构建一个基于 Centos 的镜像,然后在这个基础镜像上面安装一个 Nginx 服务器,这样就可以构成一个属于我们自己的镜像了。 38 | * Containers:容器,容器是一个镜像的可运行的实例,可以使用 Docker REST API 或者 CLI 来操作容器,容器的实质是进程,但与直接在宿主执行的进程不同,容器进程运行于属于自己的独立的[命名空间](https://en.wikipedia.org/wiki/Linux_namespaces)。因此容器可以拥有自己的 **root 文件系统、自己的网络配置、自己的进程空间,甚至自己的用户 ID 空间**。容器内的进程是运行在一个隔离的环境里,使用起来,就好像是在一个独立于宿主的系统下操作一样。这种特性使得容器封装的应用比直接在宿主运行更加安全。 39 | * 底层技术支持:Namespaces(做隔离)、CGroups(做资源限制)、UnionFS(镜像和容器的分层) the-underlying-technology Docker 底层架构分析 40 | 41 | 42 | ## 安装 43 | 直接前往[官方文档](https://docs.docker.com/install/)选择合适的平台安装即可,比如我们这里想要在`centos`系统上安装 Docker,这前往地址[https://docs.docker.com/install/linux/docker-ce/centos/](https://docs.docker.com/install/linux/docker-ce/centos/)根据提示安装即可。 44 | 45 | 安装依赖软件包: 46 | ```shell 47 | $ sudo yum install -y yum-utils device-mapper-persistent-data lvm2 48 | ``` 49 | 50 | 添加软件仓库,我们这里使用稳定版 Docker,执行下面命令添加 yum 仓库地址: 51 | ```shell 52 | $ sudo yum-config-manager \ 53 | --add-repo \ 54 | https://download.docker.com/linux/centos/docker-ce.repo 55 | ``` 56 | 57 | 然后直接安装即可: 58 | ```shell 59 | $ sudo yum install docker-ce 60 | ``` 61 | 62 | 如果要安装指定的版本,可以使用 yum list 列出可用的版本: 63 | ```shell 64 | $ yum list docker-ce --showduplicates | sort -r 65 | docker-ce.x86_64 18.03.0.ce-1.el7.centos docker-ce-stable 66 | ``` 67 | 68 | 比如这里可以安装**18.03.0.ce**版本: 69 | ```shell 70 | $ sudo yum install docker-ce-18.03.0.ce 71 | ``` 72 | 73 | 要启动 Docker 也非常简单: 74 | ```shell 75 | $ sudo systemctl enable docker 76 | $ sudo systemctl start docker 77 | ``` 78 | 79 | 另外一种安装方式是可以直接下载指定的软件包直接安装即可,前往地址:[https://download.docker.com/linux/centos/7/x86_64/stable/Packages/](https://download.docker.com/linux/centos/7/x86_64/stable/Packages/) 找到合适的`.rpm`包下载,然后安装即可: 80 | ```shell 81 | $ sudo yum install /path/to/package.rpm 82 | ``` 83 | 84 | 85 | --- 86 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 87 | 88 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 89 | 90 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 91 | -------------------------------------------------------------------------------- /docs/20.Pod Hook.md: -------------------------------------------------------------------------------- 1 | # Pod Hook 2 | 3 | 我们知道`Pod`是`Kubernetes`集群中的最小单元,而 Pod 是有容器组组成的,所以在讨论 Pod 的生命周期的时候我们可以先来讨论下容器的生命周期。 4 | 5 | 实际上 Kubernetes 为我们的容器提供了生命周期钩子的,就是我们说的`Pod Hook`,Pod Hook 是由 kubelet 发起的,当容器中的进程启动前或者容器中的进程终止之前运行,这是包含在容器的生命周期之中。我们可以同时为 Pod 中的所有容器都配置 hook。 6 | 7 | Kubernetes 为我们提供了两种钩子函数: 8 | 9 | * PostStart:这个钩子在容器创建后立即执行。但是,并不能保证钩子将在容器`ENTRYPOINT`之前运行,因为没有参数传递给处理程序。主要用于资源部署、环境准备等。不过需要注意的是如果钩子花费太长时间以至于不能运行或者挂起, 容器将不能达到`running`状态。 10 | * PreStop:这个钩子在容器终止之前立即被调用。它是阻塞的,意味着它是同步的, 所以它必须在删除容器的调用发出之前完成。主要用于优雅关闭应用程序、通知其他系统等。如果钩子在执行期间挂起, Pod阶段将停留在`running`状态并且永不会达到`failed`状态。 11 | 12 | 如果`PostStart`或者`PreStop`钩子失败, 它会杀死容器。所以我们应该让钩子函数尽可能的轻量。当然有些情况下,长时间运行命令是合理的, 比如在停止容器之前预先保存状态。 13 | 14 | 另外我们有两种方式来实现上面的钩子函数: 15 | 16 | * Exec - 用于执行一段特定的命令,不过要注意的是该命令消耗的资源会被计入容器。 17 | * HTTP - 对容器上的特定的端点执行`HTTP`请求。 18 | 19 | 20 | ### 示例1 环境准备 21 | 以下示例中,定义了一个Nginx Pod,其中设置了`PostStart`钩子函数,即在容器创建成功后,写入一句话到`/usr/share/message`文件中。 22 | ```yaml 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | name: hook-demo1 27 | spec: 28 | containers: 29 | - name: hook-demo1 30 | image: nginx 31 | lifecycle: 32 | postStart: 33 | exec: 34 | command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] 35 | ``` 36 | 37 | ### 示例2 优雅删除资源对象 38 | 当用户请求删除含有 pod 的资源对象时(如Deployment等),K8S 为了让应用程序优雅关闭(即让应用程序完成正在处理的请求后,再关闭软件),K8S提供两种信息通知: 39 | 40 | * 默认:K8S 通知 node 执行`docker stop`命令,docker 会先向容器中`PID`为1的进程发送系统信号`SIGTERM`,然后等待容器中的应用程序终止执行,如果等待时间达到设定的超时时间,或者默认超时时间(30s),会继续发送`SIGKILL`的系统信号强行 kill 掉进程。 41 | * 使用 pod 生命周期(利用`PreStop`回调函数),它执行在发送终止信号之前。 42 | 43 | 默认所有的优雅退出时间都在30秒内。kubectl delete 命令支持 `--grace-period=`选项,这个选项允许用户用他们自己指定的值覆盖默认值。值'0'代表 强制删除 pod. 在 kubectl 1.5 及以上的版本里,执行强制删除时必须同时指定 `--force --grace-period=0`。 44 | 45 | 强制删除一个 pod 是从集群状态还有 etcd 里立刻删除这个 pod。 当 Pod 被强制删除时, api 服务器不会等待来自 Pod 所在节点上的 kubelet 的确认信息:pod 已经被终止。在 API 里 pod 会被立刻删除,在节点上, pods 被设置成立刻终止后,在强行杀掉前还会有一个很小的宽限期。 46 | 47 | 以下示例中,定义了一个Nginx Pod,其中设置了`PreStop`钩子函数,即在容器退出之前,优雅的关闭 Nginx: 48 | ```yaml 49 | apiVersion: v1 50 | kind: Pod 51 | metadata: 52 | name: hook-demo2 53 | spec: 54 | containers: 55 | - name: hook-demo2 56 | image: nginx 57 | lifecycle: 58 | preStop: 59 | exec: 60 | command: ["/usr/sbin/nginx","-s","quit"] 61 | 62 | --- 63 | apiVersion: v1 64 | kind: Pod 65 | metadata: 66 | name: hook-demo2 67 | labels: 68 | app: hook 69 | spec: 70 | containers: 71 | - name: hook-demo2 72 | image: nginx 73 | ports: 74 | - name: webport 75 | containerPort: 80 76 | volumeMounts: 77 | - name: message 78 | mountPath: /usr/share/ 79 | lifecycle: 80 | preStop: 81 | exec: 82 | command: ['/bin/sh', '-c', 'echo Hello from the preStop Handler > /usr/share/message'] 83 | volumes: 84 | - name: message 85 | hostPath: 86 | path: /tmp 87 | ``` 88 | 89 | 另外`Hook`调用的日志没有暴露个给 Pod 的 event,所以只能通过`describe`命令来获取,如果有错误将可以看到`FailedPostStartHook`或`FailedPreStopHook`这样的 event。 90 | 91 | 92 | 93 | --- 94 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 95 | 96 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 97 | 98 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 99 | -------------------------------------------------------------------------------- /docs/22.初始化容器.md: -------------------------------------------------------------------------------- 1 | ## 22. 初始化容器 2 | 3 | 上节课我们学习了容器的健康检查的两个探针:`liveness probe`(存活探针)和`readiness probe`(可读性探针)的使用方法,我们说在这两个探针是可以影响容器的生命周期的,包括我们之前提到的容器的两个钩子函数`PostStart`和`PreStop`。我们今天要给大家介绍的是`Init Container`(初始化容器)。 4 | 5 | `Init Container`就是用来做初始化工作的容器,可以是一个或者多个,如果有多个的话,这些容器会按定义的顺序依次执行,只有所有的`Init Container`执行完后,主容器才会被启动。我们知道一个`Pod`里面的所有容器是共享数据卷和网络命名空间的,所以`Init Container`里面产生的数据可以被主容器使用到的。 6 | 7 | 是不是感觉`Init Container`和之前的钩子函数有点类似啊,只是是在容器执行前来做一些工作,是吧?从直观的角度看上去的话,初始化容器的确有点像`PreStart`,但是钩子函数和我们的`Init Container`是处在不同的阶段的,我们可以通过下面的图来了解下: 8 | ![loap](./images/loap.jpg) 9 | 10 | 从上面这张图我们可以直观的看到`PostStart`和`PreStop`包括`liveness`和`readiness`是属于主容器的生命周期范围内的,而`Init Container`是独立于主容器之外的,当然他们都属于`Pod`的生命周期范畴之内的,现在我们应该明白`Init Container`和钩子函数之类的区别了吧。 11 | 12 | 另外我们可以看到上面我们的`Pod`右边还有一个`infra`的容器,这是一个什么容器呢?我们可以在集群环境中去查看下人任意一个`Pod`对应的运行的`Docker`容器,我们可以发现每一个`Pod`下面都包含了一个`pause-amd64`的镜像,这个就是我们的`infra`镜像,我们知道`Pod`下面的所有容器是共享同一个网络命名空间的,这个镜像就是来做这个事情的,所以每一个`Pod`当中都会包含一个这个镜像。 13 | 14 | > 很多同学最开始 Pod 启动不起来就是因为这个 infra 镜像没有被拉下来,因为默认该镜像是需要到谷歌服务器上拉取的,所以需要提前拉取到节点上面。 15 | 16 | 我们说`Init Container`主要是来做初始化容器工作的,那么他有哪些应用场景呢? 17 | 18 | * 等待其他模块Ready:这个可以用来解决服务之间的依赖问题,比如我们有一个 Web 服务,该服务又依赖于另外一个数据库服务,但是在我们启动这个 Web 服务的时候我们并不能保证依赖的这个数据库服务就已经启动起来了,所以可能会出现一段时间内 Web 服务连接数据库异常。要解决这个问题的话我们就可以在 Web 服务的 Pod 中使用一个 InitContainer,在这个初始化容器中去检查数据库是否已经准备好了,准备好了过后初始化容器就结束退出,然后我们的主容器 Web 服务被启动起来,这个时候去连接数据库就不会有问题了。 19 | * 做初始化配置:比如集群里检测所有已经存在的成员节点,为主容器准备好集群的配置信息,这样主容器起来后就能用这个配置信息加入集群。 20 | * 其它场景:如将 pod 注册到一个中央数据库、配置中心等。 21 | 22 | 23 | 我们先来给大家演示下服务依赖的场景下初始化容器的使用方法,如下`Pod`的定义方法 24 | ```yaml 25 | apiVersion: v1 26 | kind: Pod 27 | metadata: 28 | name: init-pod1 29 | labels: 30 | app: init 31 | spec: 32 | containers: 33 | - name: init-container 34 | image: busybox 35 | command: ['sh', '-c', 'echo The app is running! && sleep 3600'] 36 | initContainers: 37 | - name: init-myservice 38 | image: busybox 39 | command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] 40 | - name: init-mydb 41 | image: busybox 42 | command: ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;'] 43 | ``` 44 | 45 | `Service`的对应`YAML`内容: 46 | ```yaml 47 | kind: Service 48 | apiVersion: v1 49 | metadata: 50 | name: myservice 51 | spec: 52 | ports: 53 | - protocol: TCP 54 | port: 80 55 | targetPort: 6376 56 | --- 57 | kind: Service 58 | apiVersion: v1 59 | metadata: 60 | name: mydb 61 | spec: 62 | ports: 63 | - protocol: TCP 64 | port: 80 65 | targetPort: 6377 66 | ``` 67 | 68 | 我们可以先创建上面的`Pod`,然后查看下`Pod`的状态,然后再创建下面的`Service`,对比下前后状态。 69 | 70 | 我们在`Pod`启动过程中,初始化容器会按顺序在网络和数据卷初始化之后启动。每个容器必须在下一个容器启动之前成功退出。如果由于运行时或失败退出,导致容器启动失败,它会根据`Pod`的`restartPolicy`指定的策略进行重试。 然而,如果 Pod 的 restartPolicy 设置为 Always,Init 容器失败时会使用 RestartPolicy 策略。 71 | 72 | 在所有的初始化容器没有成功之前,`Pod`将不会变成 `Ready`状态。正在初始化中的`Pod`处于`Pending`状态,但应该会将条件`Initializing`设置为 true。 73 | 74 | 75 | 接下来我们再来尝试创建一个做初始化配置工作的`Pod`: 76 | ```yaml 77 | apiVersion: v1 78 | kind: Pod 79 | metadata: 80 | name: init-demo 81 | spec: 82 | containers: 83 | - name: nginx 84 | image: nginx 85 | ports: 86 | - containerPort: 80 87 | volumeMounts: 88 | - name: workdir 89 | mountPath: /usr/share/nginx/html 90 | initContainers: 91 | - name: install 92 | image: busybox 93 | command: 94 | - wget 95 | - "-O" 96 | - "/work-dir/index.html" 97 | - http://www.baidu.com 98 | volumeMounts: 99 | - name: workdir 100 | mountPath: "/work-dir" 101 | volumes: 102 | - name: workdir 103 | emptyDir: {} 104 | ``` 105 | 106 | 我们可以看到这里又出现了`volumes`,`spec.volumes`指的是`Pod`中的卷,`spec.containers.volumeMounts`,是将指定的卷 mount 到容器指定的位置,相当于docker里面的`-v 宿主机目录:容器目录`,我们前面用到过`hostPath`,我们这里使用的是`emptyDir{}`,这个就相当于一个共享卷,是一个临时的目录,生命周期等同于`Pod`的生命周期。 107 | 108 | 初始化容器执行完,会下载一个 html 文件映射到emptyDir{},而主容器也是和 spec.volumes 里的emptyDir{} 进行映射,所以`nginx容器的`/usr/share/nginx/html`目录下会映射 index.html 文件。 109 | 110 | 111 | 我们来创建下该`Pod`,然后验证nginx容器是否运行: 112 | ```shell 113 | $ kubectl get pod init-demo 114 | ``` 115 | 输出显示了nginx容器正在运行: 116 | ```shell 117 | NAME READY STATUS RESTARTS AGE 118 | nginx 1/1 Running 0 43m 119 | ``` 120 | 在 init-demo 容器里的 nginx 容器打开一个 shell: 121 | ```shell 122 | $ kubectl exec -it init-demo -- /bin/bash 123 | ``` 124 | 在Shell里,直接查看下 index.html 的内容: 125 | 126 | ```shell 127 | root@nginx:~# cat /usr/share/nginx/html/index.html 128 | ``` 129 | 130 | 如果我们看到有**百度**相关的信息那么证明我们上面的初始化的工作就完成了。 131 | 132 | 这就是我们初始化容器的使用方法,到这里我们就把`Pod`的整个生命周期当中的几个主要阶段讲完了,第一个是容器的两个钩子函数:`PostStart`和`PreStop`,还有就是容器健康检查的两个探针:`liveness probe`和`readiness probe`,以及这节课的`Init Container`。下节课开始我们来讲解一些常用的控制器和`Pod`的结合。 133 | ``` 134 | 135 | 136 | --- 137 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 138 | 139 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 140 | 141 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 142 | -------------------------------------------------------------------------------- /docs/23.使用RC管理Pod.md: -------------------------------------------------------------------------------- 1 | ## 使用Replication Controller、Replica Set 管理Pod 2 | 3 | 前面我们的课程中学习了`Pod`的一些基本使用方法,而且前面我们都是直接来操作的`Pod`,假如我们现在有一个`Pod`正在提供线上的服务,我们来想想一下我们可能会遇到的一些场景: 4 | 5 | * 某次运营活动非常成功,网站访问量突然暴增 6 | * 运行当前`Pod`的节点发生故障了,`Pod`不能正常提供服务了 7 | 8 | 第一种情况,可能比较好应对,一般活动之前我们会大概计算下会有多大的访问量,提前多启动几个`Pod`,活动结束后再把多余的`Pod`杀掉,虽然有点麻烦,但是应该还是能够应对这种情况的。 9 | 10 | 第二种情况,可能某天夜里收到大量报警说服务挂了,然后起来打开电脑在另外的节点上重新启动一个新的`Pod`,问题也很好的解决了。 11 | 12 | 如果我们都人工的去解决遇到的这些问题,似乎又回到了以前刀耕火种的时代了是吧,如果有一种工具能够来帮助我们管理`Pod`就好了,`Pod`不够了自动帮我新增一个,`Pod`挂了自动帮我在合适的节点上重新启动一个`Pod`,这样是不是遇到上面的问题我们都不需要手动去解决了。 13 | 14 | 幸运的是,`Kubernetes`就为我们提供了这样的资源对象: 15 | 16 | * Replication Controller:用来部署、升级`Pod` 17 | * Replica Set:下一代的`Replication Controller` 18 | * Deployment:可以更加方便的管理`Pod`和`Replica Set` 19 | 20 | 21 | ### Replication Controller(RC) 22 | `Replication Controller`简称`RC`,`RC`是`Kubernetes`系统中的核心概念之一,简单来说,`RC`可以保证在任意时间运行`Pod`的副本数量,能够保证`Pod`总是可用的。如果实际`Pod`数量比指定的多那就结束掉多余的,如果实际数量比指定的少就新启动一些`Pod`,当`Pod`失败、被删除或者挂掉后,`RC`都会去自动创建新的`Pod`来保证副本数量,所以即使只有一个`Pod`,我们也应该使用`RC`来管理我们的`Pod`。 23 | 24 | 我们想想如果现在我们遇到上面的问题的话,可能除了第一个不能做到完全自动化,其余的我们是不是都不用担心了,运行`Pod`的节点挂了,`RC`检测到`Pod`失败了,就会去合适的节点重新启动一个`Pod`就行,不需要我们手动去新建一个`Pod`了。如果是第一种情况的话在活动开始之前我们给`Pod`指定10个副本,结束后将副本数量改成2,这样是不是也远比我们手动去启动、手动去关闭要好得多,而且我们后面还会给大家介绍另外一种资源对象`HPA`可以根据资源的使用情况来进行自动扩缩容,这样以后遇到这种情况,我们就真的可以安心的去睡觉了。 25 | 26 | 现在我们来使用`RC`来管理我们前面使用的`Nginx`的`Pod`,`YAML`文件如下: 27 | ```yaml 28 | apiVersion: v1 29 | kind: ReplicationController 30 | metadata: 31 | name: rc-demo 32 | labels: 33 | name: rc 34 | spec: 35 | replicas: 3 36 | selector: 37 | name: rc 38 | template: 39 | metadata: 40 | labels: 41 | name: rc 42 | spec: 43 | containers: 44 | - name: nginx-demo 45 | image: nginx 46 | ports: 47 | - containerPort: 80 48 | ``` 49 | 50 | 上面的`YAML`文件相对于我们之前的`Pod`的格式: 51 | 52 | * kind:`ReplicationController` 53 | * spec.replicas: 指定`Pod`副本数量,默认为1 54 | * spec.selector: `RC`通过该属性来筛选要控制的`Pod` 55 | * spec.template: 这里就是我们之前的`Pod`的定义的模块,但是不需要`apiVersion`和`kind`了 56 | * spec.template.metadata.labels: 注意这里的`Pod`的`labels`要和`spec.selector`相同,这样`RC`就可以来控制当前这个`Pod`了。 57 | 58 | 这个`YAML`文件中的意思就是定义了一个`RC`资源对象,它的名字叫`rc-demo`,保证一直会有3个`Pod`运行,`Pod`的镜像是`nginx`镜像。 59 | 60 | > 注意`spec.selector`和`spec.template.metadata.labels`这两个字段必须相同,否则会创建失败的,当然我们也可以不写`spec.selector`,这样就默认与`Pod`模板中的`metadata.labels`相同了。所以为了避免不必要的错误的话,不写为好。 61 | 62 | 然后我们来创建上面的`RC`对象(保存为 rc-demo.yaml): 63 | ```shell 64 | $ kubectl create -f rc-demo.yaml 65 | ``` 66 | 67 | 查看`RC`: 68 | ```shell 69 | $ kubectl get rc 70 | ``` 71 | 72 | 查看具体信息: 73 | ```shell 74 | $ kubectl describe rc rc-demo 75 | ``` 76 | 77 | 然后我们通过`RC`来修改下`Pod`的副本数量为2: 78 | ```shell 79 | $ kubectl apply -f rc-demo.yaml 80 | ``` 81 | 或者 82 | ```shell 83 | $ kubectl edit rc rc-demo 84 | ``` 85 | 86 | 而且我们还可以用`RC`来进行滚动升级,比如我们将镜像地址更改为`nginx:1.7.9`: 87 | ```shell 88 | $ kubectl rolling-update rc-demo --image=nginx:1.7.9 89 | ``` 90 | 但是如果我们的`Pod`中多个容器的话,就需要通过修改`YAML`文件来进行修改了: 91 | ```shell 92 | $ kubectl rolling-update rc-demo -f rc-demo.yaml 93 | ``` 94 | 如果升级完成后出现了新的问题,想要一键回滚到上一个版本的话,使用`RC`只能用同样的方法把镜像地址替换成之前的,然后重新滚动升级。 95 | 96 | 97 | ### Replication Set(RS) 98 | `Replication Set`简称`RS`,随着`Kubernetes`的高速发展,官方已经推荐我们使用`RS`和`Deployment`来代替`RC`了,实际上`RS`和`RC`的功能基本一致,目前唯一的一个区别就是`RC`只支持基于等式的`selector`(env=dev或environment!=qa),但`RS`还支持基于集合的`selector`(version in (v1.0, v2.0)),这对复杂的运维管理就非常方便了。 99 | 100 | `kubectl`命令行工具中关于`RC`的大部分命令同样适用于我们的`RS`资源对象。不过我们也很少会去单独使用`RS`,它主要被`Deployment`这个更加高层的资源对象使用,除非用户需要自定义升级功能或根本不需要升级`Pod`,在一般情况下,我们推荐使用`Deployment`而不直接使用`Replica Set`。 101 | 102 | 最后我们总结下关于`RC`/`RS`的一些特性和作用吧: 103 | 104 | * 大部分情况下,我们可以通过定义一个`RC`实现的`Pod`的创建和副本数量的控制 105 | * `RC`中包含一个完整的`Pod`定义模块(不包含`apiversion`和`kind`) 106 | * `RC`是通过`label selector`机制来实现对`Pod`副本的控制的 107 | * 通过改变`RC`里面的`Pod`副本数量,可以实现`Pod`的扩缩容功能 108 | * 通过改变`RC`里面的`Pod`模板中镜像版本,可以实现`Pod`的滚动升级功能(但是不支持一键回滚,需要用相同的方法去修改镜像地址) 109 | 110 | 111 | 好,这节课我们就给大家介绍了使用`RC`或者`RS`来管理我们的`Pod`,我们下节课来给大家介绍另外一种更加高级也是现在推荐使用的一个资源对象`Deployment`。 112 | 113 | 114 | 115 | --- 116 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 117 | 118 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 119 | 120 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 121 | -------------------------------------------------------------------------------- /docs/26.Job与Cronjob 的使用.md: -------------------------------------------------------------------------------- 1 | # Job 和 Cronjob 的使用 2 | 3 | 上节课我们学习了`Pod`自动伸缩的方法,我们使用到了`HPA`这个资源对象,我们在后面的课程中还会和大家接触到`HPA`的。今天我们来给大家介绍另外一类资源对象:Job,我们在日常的工作中经常都会遇到一些需要进行批量数据处理和分析的需求,当然也会有按时间来进行调度的工作,在我们的`Kubernetes`集群中为我们提供了`Job`和`CronJob`两种资源对象来应对我们的这种需求。 4 | 5 | `Job`负责处理任务,即仅执行一次的任务,它保证批处理任务的一个或多个`Pod`成功结束。而`CronJob`则就是在`Job`上加上了时间调度。 6 | 7 | 8 | ## Job 9 | 10 | 我们用`Job`这个资源对象来创建一个任务,我们定一个`Job`来执行一个倒计时的任务,定义`YAML`文件: 11 | ```yaml 12 | apiVersion: batch/v1 13 | kind: Job 14 | metadata: 15 | name: job-demo 16 | spec: 17 | template: 18 | metadata: 19 | name: job-demo 20 | spec: 21 | restartPolicy: Never 22 | containers: 23 | - name: counter 24 | image: busybox 25 | command: 26 | - "bin/sh" 27 | - "-c" 28 | - "for i in 9 8 7 6 5 4 3 2 1; do echo $i; done" 29 | ``` 30 | 31 | 注意`Job`的`RestartPolicy`仅支持`Never`和`OnFailure`两种,不支持`Always`,我们知道`Job`就相当于来执行一个批处理任务,执行完就结束了,如果支持`Always`的话是不是就陷入了死循环了? 32 | 33 | 然后来创建该`Job`,保存为`job-demo.yaml`: 34 | ```shell 35 | $ kubectl create -f ./job.yaml 36 | job "job-demo" created 37 | ``` 38 | 39 | 然后我们可以查看当前的`Job`资源对象: 40 | ```shell 41 | $ kubectl get jobs 42 | ``` 43 | 44 | 注意查看我们的`Pod`的状态,同样我们可以通过`kubectl logs`来查看当前任务的执行结果。 45 | 46 | 47 | ## CronJob 48 | 49 | `CronJob`其实就是在`Job`的基础上加上了时间调度,我们可以:在给定的时间点运行一个任务,也可以周期性地在给定时间点运行。这个实际上和我们`Linux`中的`crontab`就非常类似了。 50 | 51 | 一个`CronJob`对象其实就对应中`crontab`文件中的一行,它根据配置的时间格式周期性地运行一个`Job`,格式和`crontab`也是一样的。 52 | 53 | `crontab`的格式如下: 54 | 55 | > **分 时 日 月 星期 要运行的命令** 56 | 第1列分钟0~59 57 | 第2列小时0~23) 58 | 第3列日1~31 59 | 第4列月1~12 60 | 第5列星期0~7(0和7表示星期天) 61 | 第6列要运行的命令 62 | 63 | 64 | 现在,我们用`CronJob`来管理我们上面的`Job`任务, 65 | 66 | ```yaml 67 | apiVersion: batch/v2alpha1 68 | kind: CronJob 69 | metadata: 70 | name: cronjob-demo 71 | spec: 72 | schedule: "*/1 * * * *" 73 | jobTemplate: 74 | spec: 75 | template: 76 | spec: 77 | restartPolicy: OnFailure 78 | containers: 79 | - name: hello 80 | image: busybox 81 | args: 82 | - "bin/sh" 83 | - "-c" 84 | - "for i in 9 8 7 6 5 4 3 2 1; do echo $i; done" 85 | ``` 86 | 87 | 我们这里的`Kind`是`CronJob`了,要注意的是`.spec.schedule`字段是必须填写的,用来指定任务运行的周期,格式就和`crontab`一样,另外一个字段是`.spec.jobTemplate`, 用来指定需要运行的任务,格式当然和`Job`是一致的。还有一些值得我们关注的字段`.spec.successfulJobsHistoryLimit`和`.spec.failedJobsHistoryLimit`,表示历史限制,是可选的字段。它们指定了可以保留多少完成和失败的`Job`,默认没有限制,所有成功和失败的`Job`都会被保留。然而,当运行一个`Cron Job`时,`Job`可以很快就堆积很多,所以一般推荐设置这两个字段的值。如果设置限制的值为 0,那么相关类型的`Job`完成后将不会被保留。 88 | 89 | 接下来我们来创建这个`cronjob` 90 | ```shell 91 | $ kubectl create -f cronjob-demo.yaml 92 | cronjob "cronjob-demo" created 93 | ``` 94 | 95 | 当然,也可以用`kubectl run`来创建一个`CronJob`: 96 | ```shell 97 | kubectl run hello --schedule="*/1 * * * *" --restart=OnFailure --image=busybox -- /bin/sh -c "date; echo Hello from the Kubernetes cluster" 98 | $ kubectl get cronjob 99 | NAME SCHEDULE SUSPEND ACTIVE LAST-SCHEDULE 100 | hello */1 * * * * False 0 101 | $ kubectl get jobs 102 | NAME DESIRED SUCCESSFUL AGE 103 | hello-1202039034 1 1 49s 104 | $ pods=$(kubectl get pods --selector=job-name=hello-1202039034 --output=jsonpath={.items..metadata.name} -a) 105 | $ kubectl logs $pods 106 | Mon Aug 29 21:34:09 UTC 2016 107 | Hello from the Kubernetes cluster 108 | ``` 109 | 110 | ```shell 111 | $ kubectl delete cronjob hello 112 | cronjob "hello" deleted 113 | ``` 114 | 115 | 一旦不再需要 Cron Job,简单地可以使用 kubectl 命令删除它: 116 | ```shell 117 | $ kubectl delete cronjob hello 118 | cronjob "hello" deleted 119 | ``` 120 | 这将会终止正在创建的 Job。然而,运行中的 Job 将不会被终止,不会删除 Job 或 它们的 Pod。为了清理那些 Job 和 Pod,需要列出该 Cron Job 创建的全部 Job,然后删除它们: 121 | ``` 122 | $ kubectl get jobs 123 | NAME DESIRED SUCCESSFUL AGE 124 | hello-1201907962 1 1 11m 125 | hello-1202039034 1 1 8m 126 | ... 127 | 128 | $ kubectl delete jobs hello-1201907962 hello-1202039034 ... 129 | job "hello-1201907962" deleted 130 | job "hello-1202039034" deleted 131 | ... 132 | ``` 133 | 一旦 Job 被删除,由 Job 创建的 Pod 也会被删除。注意,所有由名称为 “hello” 的 Cron Job 创建的 Job 会以前缀字符串 “hello-” 进行命名。如果想要删除当前 Namespace 中的所有 Job,可以通过命令 kubectl delete jobs --all 立刻删除它们。 134 | 135 | 136 | 137 | --- 138 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 139 | 140 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 141 | 142 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 143 | -------------------------------------------------------------------------------- /docs/6.数据共享与持久化.md: -------------------------------------------------------------------------------- 1 | # 6. 数据共享与持久化 2 | 这一节介绍如何在 Docker 内部以及容器之间管理数据,在容器中管理数据主要有两种方式: 3 | 4 | * 数据卷(Data Volumes) 5 | * 挂载主机目录 (Bind mounts) 6 | 7 | ## 数据卷 8 | `数据卷`是一个可供一个或多个容器使用的特殊目录,它绕过`UFS`,可以提供很多有用的特性: 9 | 10 | * 数据卷 可以在容器之间共享和重用 11 | * 对 数据卷 的修改会立马生效 12 | * 对 数据卷 的更新,不会影响镜像 13 | * 数据卷 默认会一直存在,即使容器被删除 14 | 15 | > 注意:数据卷 的使用,类似于 Linux 下对目录或文件进行 mount,镜像中的被指定为挂载点的目录中的文件会隐藏掉,能显示看的是挂载的 数据卷。 16 | 17 | 选择 -v 还是 -–mount 参数: 18 | Docker 新用户应该选择`--mount`参数,经验丰富的 Docker 使用者对`-v`或者 `--volume`已经很熟悉了,但是推荐使用`--mount`参数。 19 | 20 | 创建一个数据卷: 21 | ```shell 22 | $ docker volume create my-vol 23 | ``` 24 | 25 | 查看所有的 数据卷: 26 | ```shell 27 | $ docker volume ls 28 | local my-vol 29 | ``` 30 | 31 | 在主机里使用以下命令可以查看指定 数据卷 的信息 32 | ```shell 33 | $ docker volume inspect my-vol 34 | [ 35 | { 36 | "Driver": "local", 37 | "Labels": {}, 38 | "Mountpoint": "/var/lib/docker/volumes/my-vol/_data", 39 | "Name": "my-vol", 40 | "Options": {}, 41 | "Scope": "local" 42 | } 43 | ] 44 | ``` 45 | 46 | 启动一个挂载数据卷的容器:在用`docker run`命令的时候,使用`--mount`标记来将 数据卷 挂载到容器里。在一次`docker run`中可以挂载多个 数据卷。下面创建一个名为 web 的容器,并加载一个 数据卷 到容器的 /webapp 目录。 47 | ```shell 48 | $ docker run -d -P \ 49 | --name web \ 50 | # -v my-vol:/wepapp \ 51 | --mount source=my-vol,target=/webapp \ 52 | training/webapp \ 53 | python app.py 54 | ``` 55 | 56 | 查看数据卷的具体信息:在主机里使用以下命令可以查看 web 容器的信息 57 | ```shell 58 | $ docker inspect web 59 | ... 60 | "Mounts": [ 61 | { 62 | "Type": "volume", 63 | "Name": "my-vol", 64 | "Source": "/var/lib/docker/volumes/my-vol/_data", 65 | "Destination": "/app", 66 | "Driver": "local", 67 | "Mode": "", 68 | "RW": true, 69 | "Propagation": "" 70 | } 71 | ], 72 | ... 73 | ``` 74 | 75 | 删除数据卷: 76 | ``` 77 | $ docker volume rm my-vol 78 | ``` 79 | 80 | 数据卷 是被设计用来持久化数据的,它的生命周期独立于容器,Docker 不会在容器被删除后自动删除 数据卷,并且也不存在垃圾回收这样的机制来处理没有任何容器引用的 数据卷。如果需要在删除容器的同时移除数据卷。可以在删除容器的时候使用`docker rm -v`这个命令。 81 | 无主的数据卷可能会占据很多空间,要清理请使用以下命令 82 | ``` 83 | $ docker volume prune 84 | ``` 85 | 86 | ## 挂载主机目录 87 | 选择 -v 还是 -–mount 参数: 88 | Docker 新用户应该选择 --mount 参数,经验丰富的 Docker 使用者对 -v 或者 --volume 已经很熟悉了,但是推荐使用 --mount 参数。 89 | 90 | 挂载一个主机目录作为数据卷:使用 `--mount` 标记可以指定挂载一个本地主机的目录到容器中去。 91 | ```shell 92 | $ docker run -d -P \ 93 | --name web \ 94 | # -v /src/webapp:/opt/webapp \ 95 | --mount type=bind,source=/src/webapp,target=/opt/webapp \ 96 | training/webapp \ 97 | python app.py 98 | ``` 99 | 上面的命令加载主机的 /src/webapp 目录到容器的 /opt/webapp目录。这个功能在进行测试的时候十分方便,比如用户可以放置一些程序到本地目录中,来查看容器是否正常工作。本地目录的路径必须是绝对路径,以前使用 -v 参数时如果本地目录不存在 Docker 会自动为你创建一个文件夹,现在使用 --mount 参数时如果本地目录不存在,Docker 会报错。 100 | 101 | Docker 挂载主机目录的默认权限是 读写,用户也可以通过增加`readonly`指定为 只读。 102 | ```shell 103 | $ docker run -d -P \ 104 | --name web \ 105 | # -v /src/webapp:/opt/webapp:ro \ 106 | --mount type=bind,source=/src/webapp,target=/opt/webapp,readonly \ 107 | training/webapp \ 108 | python app.py 109 | ``` 110 | 111 | 加了`readonly`之后,就挂载为 只读 了。如果你在容器内 /opt/webapp 目录新建文件,会显示如下错误: 112 | ```shell 113 | /opt/webapp # touch new.txt 114 | touch: new.txt: Read-only file system 115 | ``` 116 | 117 | 查看数据卷的具体信息:在主机里使用以下命令可以查看 web 容器的信息 118 | ```shell 119 | $ docker inspect web 120 | ... 121 | "Mounts": [ 122 | { 123 | "Type": "bind", 124 | "Source": "/src/webapp", 125 | "Destination": "/opt/webapp", 126 | "Mode": "", 127 | "RW": true, 128 | "Propagation": "rprivate" 129 | } 130 | ], 131 | ``` 132 | 133 | 挂载一个本地主机文件作为数据卷:`--mount`标记也可以从主机挂载单个文件到容器中 134 | ```shell 135 | $ docker run --rm -it \ 136 | # -v $HOME/.bash_history:/root/.bash_history \ 137 | --mount type=bind,source=$HOME/.bash_history,target=/root/.bash_history \ 138 | ubuntu:17.10 \ 139 | bash 140 | 141 | root@2affd44b4667:/# history 142 | 1 ls 143 | 2 diskutil list 144 | ``` 145 | 146 | 这样就可以记录在容器输入过的命令了。 147 | 148 | 149 | 150 | --- 151 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 152 | 153 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 154 | 155 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 156 | -------------------------------------------------------------------------------- /docs/7.Docker的网络模式.md: -------------------------------------------------------------------------------- 1 | # 7. Docker 的网络模式 2 | 3 | ## Bridge模式 4 | 当`Docker`进程启动时,会在主机上创建一个名为`docker0`的虚拟网桥,此主机上启动的`Docker`容器会连接到这个虚拟网桥上。虚拟网桥的工作方式和物理交换机类似,这样主机上的所有容器就通过交换机连在了一个二层网络中。从`docker0`子网中分配一个 IP 给容器使用,并设置 docker0 的 IP 地址为容器的**默认网关**。在主机上创建一对虚拟网卡`veth pair`设备,Docker 将 veth pair 设备的一端放在新创建的容器中,并命名为`eth0`(容器的网卡),另一端放在主机中,以`vethxxx`这样类似的名字命名,并将这个网络设备加入到 docker0 网桥中。可以通过`brctl show`命令查看。 5 | 6 | `bridge`模式是 docker 的默认网络模式,不写`–net`参数,就是`bridge`模式。使用`docker run -p`时,docker 实际是在`iptables`做了`DNAT`规则,实现端口转发功能。可以使用`iptables -t nat -vnL`查看。`bridge`模式如下图所示: 7 | ​​![bridge network](./images/docker-netework-bridge.jpeg) 8 | 9 | 演示: 10 | ```shell 11 | $ docker run -tid --net=bridge --name docker_bri1 \ 12 | ubuntu-base:v3 13 | docker run -tid --net=bridge --name docker_bri2 \ 14 | ubuntu-base:v3 15 | 16 | $ brctl show 17 | $ docker exec -ti docker_bri1 /bin/bash 18 | $ ifconfig –a 19 | $ route –n 20 | ``` 21 | 22 | 如果你之前有 Docker 使用经验,你可能已经习惯了使用`--link`参数来使容器互联。 23 | 24 | 随着 Docker 网络的完善,强烈建议大家将容器加入自定义的 Docker 网络来连接多个容器,而不是使用 --link 参数。 25 | 26 | 下面先创建一个新的 Docker 网络。 27 | ```shell 28 | $ docker network create -d bridge my-net 29 | ``` 30 | 31 | `-d`参数指定 Docker 网络类型,有 `bridge overlay`。其中 overlay 网络类型用于 Swarm mode,在本小节中你可以忽略它。 32 | 33 | 运行一个容器并连接到新建的 my-net 网络 34 | ```shell 35 | $ docker run -it --rm --name busybox1 --network my-net busybox sh 36 | ``` 37 | 38 | 打开新的终端,再运行一个容器并加入到 my-net 网络 39 | ```shell 40 | $ docker run -it --rm --name busybox2 --network my-net busybox sh 41 | ``` 42 | 43 | 再打开一个新的终端查看容器信息 44 | ```shell 45 | $ docker container ls 46 | 47 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 48 | b47060aca56b busybox "sh" 11 minutes ago Up 11 minutes busybox2 49 | 8720575823ec busybox "sh" 16 minutes ago Up 16 minutes busybox1 50 | ``` 51 | 52 | 下面通过 ping 来证明 busybox1 容器和 busybox2 容器建立了互联关系。 53 | 在 busybox1 容器输入以下命令 54 | ```shell 55 | / # ping busybox2 56 | PING busybox2 (172.19.0.3): 56 data bytes 57 | 64 bytes from 172.19.0.3: seq=0 ttl=64 time=0.072 ms 58 | 64 bytes from 172.19.0.3: seq=1 ttl=64 time=0.118 ms 59 | ``` 60 | 61 | 用 ping 来测试连接 busybox2 容器,它会解析成 172.19.0.3。 62 | 同理在 busybox2 容器执行 ping busybox1,也会成功连接到。 63 | ```shell 64 | / # ping busybox1 65 | PING busybox1 (172.19.0.2): 56 data bytes 66 | 64 bytes from 172.19.0.2: seq=0 ttl=64 time=0.064 ms 67 | 64 bytes from 172.19.0.2: seq=1 ttl=64 time=0.143 ms 68 | ``` 69 | 这样,busybox1 容器和 busybox2 容器建立了互联关系。 70 | 71 | 如果你有多个容器之间需要互相连接,推荐使用`Docker Compose`。 72 | 73 | ## Host 模式 74 | 如果启动容器的时候使用`host`模式,那么这个容器将不会获得一个独立的`Network Namespace`,而是和宿主机共用一个 Network Namespace。容器将不会虚拟出自己的网卡,配置自己的 IP 等,而是使用宿主机的 IP 和端口。但是,容器的其他方面,如文件系统、进程列表等还是和宿主机隔离的。 75 | Host模式如下图所示: 76 | 77 | ​​![network host](./images/docker-network-host.jpeg) 78 | 79 | 演示: 80 | ```shell 81 | $ docker run -tid --net=host --name docker_host1 ubuntu-base:v3 82 | $ docker run -tid --net=host --name docker_host2 ubuntu-base:v3 83 | 84 | $ docker exec -ti docker_host1 /bin/bash 85 | $ docker exec -ti docker_host1 /bin/bash 86 | 87 | $ ifconfig –a 88 | $ route –n 89 | ``` 90 | 91 | ## Container 模式 92 | 这个模式指定新创建的容器和已经存在的一个容器共享一个 Network Namespace,而不是和宿主机共享。新创建的容器不会创建自己的网卡,配置自己的 IP,而是和一个指定的容器共享 IP、端口范围等。同样,两个容器除了网络方面,其他的如文件系统、进程列表等还是隔离的。两个容器的进程可以通过 lo 网卡设备通信。 93 | Container模式示意图: 94 | ![network container](./images/docker-network-container.jpeg) 95 | ​​ 96 | 演示: 97 | ```shell 98 | $ docker run -tid --net=container:docker_bri1 \ 99 | --name docker_con1 ubuntu-base:v3 100 | 101 | $ docker exec -ti docker_con1 /bin/bash 102 | $ docker exec -ti docker_bri1 /bin/bash 103 | 104 | $ ifconfig –a 105 | $ route -n 106 | ``` 107 | 108 | ## None模式 109 | 使用`none`模式,Docker 容器拥有自己的 Network Namespace,但是,并不为Docker 容器进行任何网络配置。也就是说,这个 Docker 容器没有网卡、IP、路由等信息。需要我们自己为 Docker 容器添加网卡、配置 IP 等。 110 | None模式示意图: 111 | ![network none](./images/docker-network-none.jpeg) 112 | ​​ 113 | 演示: 114 | ```shell 115 | $ docker run -tid --net=none --name \ 116 | docker_non1 ubuntu-base:v3 117 | 118 | $ docker exec -ti docker_non1 /bin/bash 119 | 120 | $ ifconfig –a 121 | $ route -n 122 | ``` 123 | 124 | Docker 的跨主机通信我们这里就先暂时不讲解,我们在后面的`Kubernetes`课程当中会用到。 125 | 126 | 127 | 128 | --- 129 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 130 | 131 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 132 | 133 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 134 | -------------------------------------------------------------------------------- /docs/9.Docker Machine.md: -------------------------------------------------------------------------------- 1 | # 9. Docker Machine 2 | [`Docker Machine`](https://docs.docker.com/machine/overview/)是`Docker`官方编排(Orchestration)项目之一,负责在多种平台上快速安装 Docker 环境。 3 | 4 | `Docker Machine`项目基于`Go`语言实现,目前在[Github](https://github.com/docker/machine)上进行维护。 5 | 6 | `Docker Machine`是 Docker 官方提供的一个工具,它可以帮助我们在远程的机器上安装 Docker,或者在虚拟机 host 上直接安装虚拟机并在虚拟机中安装 Docker。我们还可以通过 `docker-machine`命令来管理这些虚拟机和 Docker。 7 | 8 | 本章将介绍 Docker Machine 的安装及使用。 9 | 10 | ## 安装 11 | Docker Machine 可以在多种操作系统平台上安装,包括 Linux、macOS,以及 Windows。 12 | 13 | ### macOS、Windows 14 | Docker for Mac、Docker for Windows 自带 docker-machine 二进制包,安装之后即可使用。查看版本信息。 15 | ```shell 16 | $ docker-machine -v 17 | docker-machine version 0.13.0, build 9ba6da9 18 | ``` 19 | 20 | ### Linux 21 | 在 Linux 上的也安装十分简单,从[官方 GitHub Release](https://github.com/docker/machine/releases)处直接下载编译好的二进制文件即可。 22 | 例如,在 Linux 64 位系统上直接下载对应的二进制包。 23 | ```shell 24 | $ sudo curl -L https://github.com/docker/machine/releases/download/v0.13.0/docker-machine-`uname -s`-`uname -m` > /usr/local/bin/docker-machine 25 | $ sudo chmod +x /usr/local/bin/docker-machine 26 | ``` 27 | 28 | 完成后,查看版本信息。 29 | ```shell 30 | $ docker-machine -v 31 | docker-machine version 0.13.0, build 9ba6da9 32 | ``` 33 | 34 | ## 使用 35 | Docker Machine 支持多种后端驱动,包括虚拟机、本地主机和云平台等。 36 | 37 | ### 创建本地主机实例Virtualbox 驱动 38 | 使用`virtualbox`类型的驱动,创建一台 Docker 主机,命名为 test。 39 | ```shell 40 | $ docker-machine create -d virtualbox test 41 | ``` 42 | 43 | 你也可以在创建时加上如下参数,来配置主机或者主机上的 Docker。 44 | 45 | * **--engine-opt dns=114.114.114.114**配置 Docker 的默认 DNS 46 | * **--engine-registry-mirror https://registry.docker-cn.com**配置 Docker 的仓库镜像 47 | * **--virtualbox-memory 2048** 配置主机内存 48 | * **--virtualbox-cpu-count 2** 配置主机 CPU 49 | 50 | 更多参数请使用`docker-machine create --driver virtualbox --help`命令查看。 51 | 52 | ```shell 53 | $ docker-machine create -d generic \ 54 | --generic-ip-address=123.59.188.19 \ 55 | --generic-ssh-user=root \ 56 | --generic-ssh-key ~/.ssh/id_rsa \ 57 | dev 58 | ``` 59 | 60 | ### MacOS xhyve 驱动 61 | `xhyve` 驱动 GitHub: https://github.com/zchee/docker-machine-driver-xhyve,`xhyve`是`MacOS`上轻量化的虚拟引擎,使用其创建的 Docker Machine 较 VirtualBox 驱动创建的运行效率要高。 62 | ```shell 63 | $ brew install docker-machine-driver-xhyve 64 | ...... 65 | $ docker-machine create \ 66 | -d xhyve \ 67 | # --xhyve-boot2docker-url ~/.docker/machine/cache/boot2docker.iso \ 68 | --engine-opt dns=114.114.114.114 \ 69 | --engine-registry-mirror https://registry.docker-cn.com \ 70 | --xhyve-memory-size 2048 \ 71 | --xhyve-rawdisk \ 72 | --xhyve-cpu-count 2 \ 73 | xhyve 74 | ``` 75 | 76 | > 注意:非首次创建时建议加上**--xhyve-boot2docker-url ~/.docker/machine/cache/boot2docker.iso**参数,避免每次创建时都从 GitHub 下载 ISO 镜像。 77 | 78 | 更多参数请使用`docker-machine create --driver xhyve --help`命令查看。 79 | 80 | ### Windows 10 81 | `Windows 10`安装`Docker for Windows`之后不能再安装`VirtualBox`,也就不能使用 virtualbox 驱动来创建 Docker Machine,我们可以选择使用 hyperv 驱动。 82 | ```shell 83 | $ docker-machine create --driver hyperv vm 84 | ``` 85 | 86 | 更多参数请使用`docker-machine create --driver hyperv --help`命令查看。 87 | 88 | 89 | ## 使用介绍 90 | 创建好主机之后,查看主机 91 | ```shell 92 | $ docker-machine ls 93 | 94 | NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORStest - virtualbox Running tcp://192.168.99.187:2376 v17.10.0-ce 95 | ``` 96 | 97 | 创建主机成功后,可以通过`env`命令来让后续操作对象都是目标主机。 98 | ```shell 99 | $ docker-machine env test 100 | ``` 101 | 102 | 后续根据提示在命令行输入命令之后就可以操作 test 主机。也可以通过`SSH`登录到主机。 103 | ```shell 104 | $ docker-machine ssh test 105 | 106 | docker@test:~$ docker --version 107 | Docker version 17.10.0-ce, build f4ffd25 108 | ``` 109 | 110 | 连接到主机之后你就可以在其上使用 Docker 了。 111 | 112 | ### 官方支持驱动 113 | 通过`-d`选项可以选择支持的驱动类型: 114 | 115 | * amazonec2 116 | * azure 117 | * digitalocean 118 | * exoscale 119 | * generic 120 | * google 121 | * hyperv 122 | * none 123 | * openstack 124 | * rackspace 125 | * softlayer 126 | * virtualbox 127 | * vmwarevcloudair 128 | * vmwarefusion 129 | * vmwarevsphere 130 | 131 | ### 操作命令 132 | * active 查看活跃的 Docker 主机 133 | * config 输出连接的配置信息 134 | * create 创建一个 Docker 主机 135 | * env 显示连接到某个主机需要的环境变量 136 | * inspect 输出主机更多信息 137 | * ip 获取主机地址 138 | * kill 停止某个主机 139 | * ls 列出所有管理的主机 140 | * provision 重新设置一个已存在的主机 141 | * regenerate-certs 为某个主机重新生成 TLS 认证信息 142 | * restart 重启主机 143 | * rm 删除某台主机 144 | * ssh SSH 到主机上执行命令 145 | * scp 在主机之间复制文件 146 | * mount 挂载主机目录到本地 147 | * start 启动一个主机 148 | * status 查看主机状态 149 | * stop 停止一个主机 150 | * upgrade 更新主机 Docker 版本为最新 151 | * url 获取主机的 URL 152 | * version 输出 docker-machine 版本信息 153 | * help 输出帮助信息 154 | 155 | 每个命令,又带有不同的参数,可以通过如下命令来查看具体的用法: 156 | ```shell 157 | $ docker-machine COMMAND --help 158 | ``` 159 | 160 | 161 | 162 | --- 163 | [点击查看本文视频](https://youdianzhishi.com/course/6n8xd6/) 164 | 165 | 扫描下面的二维码(或微信搜索`k8s技术圈`)关注我们的微信公众帐号,在微信公众帐号中回复 **加群** 即可加入到我们的 kubernetes 讨论群里面共同学习。 166 | 167 | ![k8s技术圈二维码](https://www.qikqiak.com/img/posts/qrcode_for_gh_d6dd87b6ceb4_430.jpg) 168 | -------------------------------------------------------------------------------- /docs/images/access-modes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/access-modes.png -------------------------------------------------------------------------------- /docs/images/alertmanager-dingtalk-message.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/alertmanager-dingtalk-message.png -------------------------------------------------------------------------------- /docs/images/blue-demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo1.png -------------------------------------------------------------------------------- /docs/images/blue-demo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo2.png -------------------------------------------------------------------------------- /docs/images/blue-demo3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo3.png -------------------------------------------------------------------------------- /docs/images/blue-demo4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo4.png -------------------------------------------------------------------------------- /docs/images/blue-demo5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo5.png -------------------------------------------------------------------------------- /docs/images/blue-demo6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo6.png -------------------------------------------------------------------------------- /docs/images/blue-demo7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/blue-demo7.png -------------------------------------------------------------------------------- /docs/images/course-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/course-logo.png -------------------------------------------------------------------------------- /docs/images/dashboard-login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/dashboard-login.png -------------------------------------------------------------------------------- /docs/images/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/dashboard.png -------------------------------------------------------------------------------- /docs/images/deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/deployment.png -------------------------------------------------------------------------------- /docs/images/dns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/dns.png -------------------------------------------------------------------------------- /docs/images/docker-cadvisor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-cadvisor.png -------------------------------------------------------------------------------- /docs/images/docker-engine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-engine.png -------------------------------------------------------------------------------- /docs/images/docker-netework-bridge.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-netework-bridge.jpeg -------------------------------------------------------------------------------- /docs/images/docker-network-container.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-network-container.jpeg -------------------------------------------------------------------------------- /docs/images/docker-network-host.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-network-host.jpeg -------------------------------------------------------------------------------- /docs/images/docker-network-none.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-network-none.jpeg -------------------------------------------------------------------------------- /docs/images/docker-structrue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-structrue.png -------------------------------------------------------------------------------- /docs/images/docker-swarm-structrue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-swarm-structrue.png -------------------------------------------------------------------------------- /docs/images/docker-swarm-task-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-swarm-task-service.png -------------------------------------------------------------------------------- /docs/images/docker-ui-portainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-ui-portainer.png -------------------------------------------------------------------------------- /docs/images/docker-ui-rancher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-ui-rancher.png -------------------------------------------------------------------------------- /docs/images/docker-what.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/docker-what.png -------------------------------------------------------------------------------- /docs/images/grafana-add-dingtalk-robot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-add-dingtalk-robot.png -------------------------------------------------------------------------------- /docs/images/grafana-alert-dingtalk-robot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-alert-dingtalk-robot.png -------------------------------------------------------------------------------- /docs/images/grafana-alert-email.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-alert-email.png -------------------------------------------------------------------------------- /docs/images/grafana-cluster-table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-cluster-table.png -------------------------------------------------------------------------------- /docs/images/grafana-cpu-usage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-cpu-usage.png -------------------------------------------------------------------------------- /docs/images/grafana-dashboard-add.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-dashboard-add.png -------------------------------------------------------------------------------- /docs/images/grafana-dashboard-edit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-dashboard-edit.png -------------------------------------------------------------------------------- /docs/images/grafana-dashboard-edit2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-dashboard-edit2.png -------------------------------------------------------------------------------- /docs/images/grafana-dashboard-import.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-dashboard-import.png -------------------------------------------------------------------------------- /docs/images/grafana-dashboard-import2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-dashboard-import2.png -------------------------------------------------------------------------------- /docs/images/grafana-dingtalk-alert2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-dingtalk-alert2.png -------------------------------------------------------------------------------- /docs/images/grafana-email-alert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-email-alert.png -------------------------------------------------------------------------------- /docs/images/grafana-email-alert2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-email-alert2.png -------------------------------------------------------------------------------- /docs/images/grafana-graph-alert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-graph-alert.png -------------------------------------------------------------------------------- /docs/images/grafana-graph-notify.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-graph-notify.png -------------------------------------------------------------------------------- /docs/images/grafana-index.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-index.png -------------------------------------------------------------------------------- /docs/images/grafana-k8s-cluster-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-k8s-cluster-dashboard.png -------------------------------------------------------------------------------- /docs/images/grafana-k8s-monitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-k8s-monitor.png -------------------------------------------------------------------------------- /docs/images/grafana-k8s-plugin-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-k8s-plugin-cluster.png -------------------------------------------------------------------------------- /docs/images/grafana-k8s-plugin-config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-k8s-plugin-config.png -------------------------------------------------------------------------------- /docs/images/grafana-k8s-plugin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-k8s-plugin.png -------------------------------------------------------------------------------- /docs/images/grafana-k8s-plugin2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-k8s-plugin2.png -------------------------------------------------------------------------------- /docs/images/grafana-login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-login.png -------------------------------------------------------------------------------- /docs/images/grafana-prometheus-ds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-prometheus-ds.png -------------------------------------------------------------------------------- /docs/images/grafana-table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/grafana-table.png -------------------------------------------------------------------------------- /docs/images/helm-demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/helm-demo1.png -------------------------------------------------------------------------------- /docs/images/helm-structrure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/helm-structrure.png -------------------------------------------------------------------------------- /docs/images/ingress-config1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ingress-config1.png -------------------------------------------------------------------------------- /docs/images/ingress-config2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ingress-config2.png -------------------------------------------------------------------------------- /docs/images/ingress-config3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ingress-config3.png -------------------------------------------------------------------------------- /docs/images/ingress-config4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ingress-config4.png -------------------------------------------------------------------------------- /docs/images/jenkins-demo1-config.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-demo1-config.jpeg -------------------------------------------------------------------------------- /docs/images/jenkins-demo1-config2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-demo1-config2.jpeg -------------------------------------------------------------------------------- /docs/images/jenkins-demo1-config3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-demo1-config3.jpeg -------------------------------------------------------------------------------- /docs/images/jenkins-demo1-config4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-demo1-config4.jpeg -------------------------------------------------------------------------------- /docs/images/jenkins-demo1-config5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-demo1-config5.jpeg -------------------------------------------------------------------------------- /docs/images/jenkins-demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-demo1.png -------------------------------------------------------------------------------- /docs/images/jenkins-k8s-config1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-k8s-config1.jpg -------------------------------------------------------------------------------- /docs/images/jenkins-k8s-config2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-k8s-config2.png -------------------------------------------------------------------------------- /docs/images/jenkins-k8s-config3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-k8s-config3.png -------------------------------------------------------------------------------- /docs/images/jenkins-k8s-config4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/jenkins-k8s-config4.png -------------------------------------------------------------------------------- /docs/images/k8s-basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-basic.png -------------------------------------------------------------------------------- /docs/images/k8s-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-cluster.png -------------------------------------------------------------------------------- /docs/images/k8s-jenkins-slave.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-jenkins-slave.png -------------------------------------------------------------------------------- /docs/images/k8s-katacoda.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-katacoda.png -------------------------------------------------------------------------------- /docs/images/k8s-pod-process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-pod-process.png -------------------------------------------------------------------------------- /docs/images/k8s-pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-pod.png -------------------------------------------------------------------------------- /docs/images/k8s-qrcode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-qrcode.png -------------------------------------------------------------------------------- /docs/images/k8s-rancher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-rancher.png -------------------------------------------------------------------------------- /docs/images/k8s-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-service.png -------------------------------------------------------------------------------- /docs/images/k8s-structure.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/k8s-structure.jpeg -------------------------------------------------------------------------------- /docs/images/kube-scheduler-detail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/kube-scheduler-detail.png -------------------------------------------------------------------------------- /docs/images/kube-scheduler-filter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/kube-scheduler-filter.jpg -------------------------------------------------------------------------------- /docs/images/kube-scheduler-structrue.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/kube-scheduler-structrue.jpg -------------------------------------------------------------------------------- /docs/images/kubeadm-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/kubeadm-dashboard.png -------------------------------------------------------------------------------- /docs/images/kubedns.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/kubedns.jpg -------------------------------------------------------------------------------- /docs/images/kubernetes_monitoring_heapster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/kubernetes_monitoring_heapster.png -------------------------------------------------------------------------------- /docs/images/loap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/loap.jpg -------------------------------------------------------------------------------- /docs/images/nginx-consul.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/nginx-consul.png -------------------------------------------------------------------------------- /docs/images/ngx200.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ngx200.png -------------------------------------------------------------------------------- /docs/images/ngx403.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ngx403.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo1.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo2.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo3.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo4.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo5.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo6.png -------------------------------------------------------------------------------- /docs/images/pipeline-demo7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/pipeline-demo7.png -------------------------------------------------------------------------------- /docs/images/prometheus-alert-email.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-alert-email.png -------------------------------------------------------------------------------- /docs/images/prometheus-alertmanager-silence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-alertmanager-silence.png -------------------------------------------------------------------------------- /docs/images/prometheus-alertmanager-webui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-alertmanager-webui.png -------------------------------------------------------------------------------- /docs/images/prometheus-alerts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-alerts.png -------------------------------------------------------------------------------- /docs/images/prometheus-alerts2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-alerts2.png -------------------------------------------------------------------------------- /docs/images/prometheus-apiserver-request.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-apiserver-request.png -------------------------------------------------------------------------------- /docs/images/prometheus-apiserver.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-apiserver.png -------------------------------------------------------------------------------- /docs/images/prometheus-apiserver2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-apiserver2.png -------------------------------------------------------------------------------- /docs/images/prometheus-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-architecture.png -------------------------------------------------------------------------------- /docs/images/prometheus-cadvisor-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-cadvisor-graph.png -------------------------------------------------------------------------------- /docs/images/prometheus-cadvisor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-cadvisor.png -------------------------------------------------------------------------------- /docs/images/prometheus-dashboard-targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-dashboard-targets.png -------------------------------------------------------------------------------- /docs/images/prometheus-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-menu.png -------------------------------------------------------------------------------- /docs/images/prometheus-metrics-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-metrics-graph.png -------------------------------------------------------------------------------- /docs/images/prometheus-metrics-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-metrics-menu.png -------------------------------------------------------------------------------- /docs/images/prometheus-nodes-graph1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-nodes-graph1.png -------------------------------------------------------------------------------- /docs/images/prometheus-nodes-graph2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-nodes-graph2.png -------------------------------------------------------------------------------- /docs/images/prometheus-nodes-target.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-nodes-target.png -------------------------------------------------------------------------------- /docs/images/prometheus-nodes-target2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-nodes-target2.png -------------------------------------------------------------------------------- /docs/images/prometheus-operator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-operator.png -------------------------------------------------------------------------------- /docs/images/prometheus-service-endpoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-service-endpoints.png -------------------------------------------------------------------------------- /docs/images/prometheus-service-endpoints2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-service-endpoints2.png -------------------------------------------------------------------------------- /docs/images/prometheus-targets-redis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-targets-redis.png -------------------------------------------------------------------------------- /docs/images/prometheus-webui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/prometheus-webui.png -------------------------------------------------------------------------------- /docs/images/promethues-alertmanager-email2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promethues-alertmanager-email2.png -------------------------------------------------------------------------------- /docs/images/promethues-nodes-target2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promethues-nodes-target2.png -------------------------------------------------------------------------------- /docs/images/promethues-operator-grafana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promethues-operator-grafana.png -------------------------------------------------------------------------------- /docs/images/promethues-operator-kube-scheduler-error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promethues-operator-kube-scheduler-error.png -------------------------------------------------------------------------------- /docs/images/promethues-operator-kube-scheduler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promethues-operator-kube-scheduler.png -------------------------------------------------------------------------------- /docs/images/promethues-operator-targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promethues-operator-targets.png -------------------------------------------------------------------------------- /docs/images/promtheus-before-label.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/promtheus-before-label.png -------------------------------------------------------------------------------- /docs/images/redis-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/redis-graph.png -------------------------------------------------------------------------------- /docs/images/redis-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/redis-metrics.png -------------------------------------------------------------------------------- /docs/images/services-iptables-overview.svg: -------------------------------------------------------------------------------- 1 | Backend Pod 1labels: app=MyAppport: 9376Backend Pod 2labels: app=MyAppport: 9376Backend Pod 3labels: app=MyAppport: 9376Client kube-proxyapiserverServiceIP(iptables) Node -------------------------------------------------------------------------------- /docs/images/setup-jenkins-01-unlock.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/setup-jenkins-01-unlock.jpg -------------------------------------------------------------------------------- /docs/images/setup-jenkins-02-plugin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/setup-jenkins-02-plugin.png -------------------------------------------------------------------------------- /docs/images/setup-jenkins-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/setup-jenkins-home.png -------------------------------------------------------------------------------- /docs/images/setup-jenkins-k8s-plugin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/setup-jenkins-k8s-plugin.png -------------------------------------------------------------------------------- /docs/images/traefik-tls-demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/traefik-tls-demo1.png -------------------------------------------------------------------------------- /docs/images/traefik-tls-demo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/traefik-tls-demo2.png -------------------------------------------------------------------------------- /docs/images/traefik-tls-demo3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/traefik-tls-demo3.png -------------------------------------------------------------------------------- /docs/images/traefik-tls.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/traefik-tls.png -------------------------------------------------------------------------------- /docs/images/wordpress-home.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/wordpress-home.jpg -------------------------------------------------------------------------------- /docs/images/wordpress-ui.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/wordpress-ui.jpg -------------------------------------------------------------------------------- /docs/images/wordpress.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/wordpress.jpg -------------------------------------------------------------------------------- /docs/images/ydzs-qrcode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ydzs-qrcode.png -------------------------------------------------------------------------------- /docs/images/ydzs-xcx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cnych/kubernetes-learning/7949681dbc5403140566c96ab4070254c5676ad6/docs/images/ydzs-xcx.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # kubernetes-learning -- 从Docker到Kubernetes进阶 2 | 3 | ## 介绍 4 | 5 | [Kubernetes](http://kubernetes.io/)是Google基于Borg开源的容器编排调度引擎,作为[CNCF](http://cncf.io/)(Cloud Native Computing Foundation)最重要的组件之一,它的目标不仅仅是一个编排系统,而是提供一个规范,可以让你来描述集群的架构,定义服务的最终状态,`Kubernetes` 可以帮你将系统自动地达到和维持在这个状态。`Kubernetes` 作为云原生应用的基石,相当于一个云操作系统,其重要性不言而喻。 6 | 7 | ![从Docker到Kubernetes进阶](http://sdn.haimaxy.com/covers/2018/4/21/c4082e0f09c746aa848279a2567cffed.png) 8 | 9 | 之前一直有同学跟我说我 `Docker` 掌握得还可以,但是不知道怎么使用 `Kubernetes`,网上的其他关于 `Kubernetes` 的课程费用又太高,本书就是为你们准备的,当然如果你不了解 `Docker`,不了解 `Kubernetes`,都没有关系,我们会从 `Docker` 入门一步步深入,到 `Kubernetes` 的进阶使用的。所以大家完全没必要担心。 10 | 11 | 学完本课程以后,你将会对 `Docker` 和 `Kubernetes` 有一个更加深入的认识,我们会讲到: 12 | 13 | * `Docker` 的一些常用方法,当然我们的重点会在 Kubernetes 上面 14 | * 会用 `kubeadm` 来搭建一套 `Kubernetes` 的集群 15 | * 理解 `Kubernetes` 集群的运行原理 16 | * 常用的一些控制器使用方法 17 | * 还有 `Kubernetes` 的一些调度策略 18 | * `Kubernetes`的运维 19 | * 包管理工具 `Helm` 的使用 20 | * 最后我们会实现基于 Kubernetes 的 CI/CD 21 | 22 | 23 | GitHub地址:[https://github.com/cnych/kubernetes-learning/tree/master/docs](https://github.com/cnych/kubernetes-learning/) 24 | 25 | 视频课程在线地址:[https://youdianzhishi.com/course/6n8xd6/](https://youdianzhishi.com/course/6n8xd6/) 26 | 27 | 28 | ## 社区&读者交流 29 | 30 | * 博客:[阳明的博客](https://www.qikqiak.com/) 31 | * 微信群:`k8s`技术圈,扫描我的微信二维码,[阳明](https://www.qikqiak.com/page/about/),或直接搜索微信号**iEverything**后拉您入群,请增加备注(k8s或kubernetes) 32 | * 知乎专栏:[k8s技术圈](https://zhuanlan.zhihu.com/kube100) 33 | * 开发者头条:[k8s技术圈](https://toutiao.io/subjects/268333) 34 | * 微信公众号:扫描下面的二维码关注微信公众号`k8s技术圈` 35 | 36 | ![k8s公众帐号](./images/k8s-qrcode.png) 37 | 38 | * 优点知识:[优点知识](https://youdianzhishi.com/)是一个综合的技术学习平台,本书配套的视频教程将会发布在该平台上面,感兴趣的朋友可以扫描下发的二维码关注自己感兴趣的课程。 39 | 40 | ![优点知识服务号](./images/ydzs-qrcode.png) 41 | ![优点知识小程序](./images/ydzs-xcx.png) 42 | 43 | -------------------------------------------------------------------------------- /efkdemo/elasticsearch-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: es-cluster 5 | namespace: logging 6 | spec: 7 | serviceName: elasticsearch 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: elasticsearch 12 | template: 13 | metadata: 14 | labels: 15 | app: elasticsearch 16 | spec: 17 | containers: 18 | - name: elasticsearch 19 | image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.3 20 | resources: 21 | limits: 22 | cpu: 1000m 23 | requests: 24 | cpu: 100m 25 | ports: 26 | - containerPort: 9200 27 | name: rest 28 | protocol: TCP 29 | - containerPort: 9300 30 | name: inter-node 31 | protocol: TCP 32 | volumeMounts: 33 | - name: data 34 | mountPath: /usr/share/elasticsearch/data 35 | env: 36 | - name: cluster.name 37 | value: k8s-logs 38 | - name: node.name 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.name 42 | - name: discovery.zen.ping.unicast.hosts 43 | value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch" 44 | - name: discovery.zen.minimum_master_nodes 45 | value: "2" 46 | - name: ES_JAVA_OPTS 47 | value: "-Xms512m -Xmx512m" 48 | initContainers: 49 | - name: fix-permissions 50 | image: busybox 51 | command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] 52 | securityContext: 53 | privileged: true 54 | volumeMounts: 55 | - name: data 56 | mountPath: /usr/share/elasticsearch/data 57 | - name: increase-vm-max-map 58 | image: busybox 59 | command: ["sysctl", "-w", "vm.max_map_count=262144"] 60 | securityContext: 61 | privileged: true 62 | - name: increase-fd-ulimit 63 | image: busybox 64 | command: ["sh", "-c", "ulimit -n 65536"] 65 | securityContext: 66 | privileged: true 67 | volumeClaimTemplates: 68 | - metadata: 69 | name: data 70 | labels: 71 | app: elasticsearch 72 | spec: 73 | accessModes: [ "ReadWriteOnce" ] 74 | storageClassName: es-data-db 75 | resources: 76 | requests: 77 | storage: 50Gi 78 | -------------------------------------------------------------------------------- /efkdemo/elasticsearch-storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: es-data-db 5 | provisioner: fuseim.pri/ifs 6 | -------------------------------------------------------------------------------- /efkdemo/elasticsearch-svc.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: elasticsearch 5 | namespace: logging 6 | labels: 7 | app: elasticsearch 8 | spec: 9 | selector: 10 | app: elasticsearch 11 | clusterIP: None 12 | ports: 13 | - port: 9200 14 | name: rest 15 | - port: 9300 16 | name: inter-node 17 | -------------------------------------------------------------------------------- /efkdemo/fluentd-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: fluentd-config 5 | namespace: logging 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | data: 9 | system.conf: |- 10 | 11 | root_dir /tmp/fluentd-buffers/ 12 | 13 | containers.input.conf: |- 14 | 15 | @id fluentd-containers.log 16 | @type tail 17 | path /var/log/containers/*.log 18 | pos_file /var/log/es-containers.log.pos 19 | time_format %Y-%m-%dT%H:%M:%S.%NZ 20 | localtime 21 | tag raw.kubernetes.* 22 | format json 23 | read_from_head true 24 | 25 | # Detect exceptions in the log output and forward them as one log entry. 26 | 27 | @id raw.kubernetes 28 | @type detect_exceptions 29 | remove_tag_prefix raw 30 | message log 31 | stream stream 32 | multiline_flush_interval 5 33 | max_bytes 500000 34 | max_lines 1000 35 | 36 | system.input.conf: |- 37 | # Logs from systemd-journal for interesting services. 38 | 39 | @id journald-docker 40 | @type systemd 41 | filters [{ "_SYSTEMD_UNIT": "docker.service" }] 42 | 43 | @type local 44 | persistent true 45 | 46 | read_from_head true 47 | tag docker 48 | 49 | 50 | @id journald-kubelet 51 | @type systemd 52 | filters [{ "_SYSTEMD_UNIT": "kubelet.service" }] 53 | 54 | @type local 55 | persistent true 56 | 57 | read_from_head true 58 | tag kubelet 59 | 60 | forward.input.conf: |- 61 | # Takes the messages sent over TCP 62 | 63 | @type forward 64 | 65 | output.conf: |- 66 | # Enriches records with Kubernetes metadata 67 | 68 | @type kubernetes_metadata 69 | 70 | 71 | @id elasticsearch 72 | @type elasticsearch 73 | @log_level info 74 | include_tag_key true 75 | host elasticsearch 76 | port 9200 77 | logstash_format true 78 | request_timeout 30s 79 | 80 | @type file 81 | path /var/log/fluentd-buffers/kubernetes.system.buffer 82 | flush_mode interval 83 | retry_type exponential_backoff 84 | flush_thread_count 2 85 | flush_interval 5s 86 | retry_forever 87 | retry_max_interval 30 88 | chunk_limit_size 2M 89 | queue_limit_length 8 90 | overflow_action block 91 | 92 | 93 | -------------------------------------------------------------------------------- /efkdemo/fluentd-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fluentd-es 5 | namespace: logging 6 | labels: 7 | k8s-app: fluentd-es 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | --- 11 | kind: ClusterRole 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | metadata: 14 | name: fluentd-es 15 | labels: 16 | k8s-app: fluentd-es 17 | kubernetes.io/cluster-service: "true" 18 | addonmanager.kubernetes.io/mode: Reconcile 19 | rules: 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - "namespaces" 24 | - "pods" 25 | verbs: 26 | - "get" 27 | - "watch" 28 | - "list" 29 | --- 30 | kind: ClusterRoleBinding 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | metadata: 33 | name: fluentd-es 34 | labels: 35 | k8s-app: fluentd-es 36 | kubernetes.io/cluster-service: "true" 37 | addonmanager.kubernetes.io/mode: Reconcile 38 | subjects: 39 | - kind: ServiceAccount 40 | name: fluentd-es 41 | namespace: logging 42 | apiGroup: "" 43 | roleRef: 44 | kind: ClusterRole 45 | name: fluentd-es 46 | apiGroup: "" 47 | --- 48 | apiVersion: apps/v1 49 | kind: DaemonSet 50 | metadata: 51 | name: fluentd-es 52 | namespace: logging 53 | labels: 54 | k8s-app: fluentd-es 55 | version: v2.0.4 56 | kubernetes.io/cluster-service: "true" 57 | addonmanager.kubernetes.io/mode: Reconcile 58 | spec: 59 | selector: 60 | matchLabels: 61 | k8s-app: fluentd-es 62 | version: v2.0.4 63 | template: 64 | metadata: 65 | labels: 66 | k8s-app: fluentd-es 67 | kubernetes.io/cluster-service: "true" 68 | version: v2.0.4 69 | # This annotation ensures that fluentd does not get evicted if the node 70 | # supports critical pod annotation based priority scheme. 71 | # Note that this does not guarantee admission on the nodes (#40573). 72 | annotations: 73 | scheduler.alpha.kubernetes.io/critical-pod: '' 74 | spec: 75 | priorityClassName: system-node-critical 76 | serviceAccountName: fluentd-es 77 | containers: 78 | - name: fluentd-es 79 | image: cnych/fluentd-elasticsearch:v2.0.4 80 | env: 81 | - name: FLUENTD_ARGS 82 | value: --no-supervisor -q 83 | resources: 84 | limits: 85 | memory: 500Mi 86 | requests: 87 | cpu: 100m 88 | memory: 200Mi 89 | volumeMounts: 90 | - name: varlog 91 | mountPath: /var/log 92 | - name: varlibdockercontainers 93 | mountPath: /data/docker/containers 94 | readOnly: true 95 | - name: config-volume 96 | mountPath: /etc/fluent/config.d 97 | nodeSelector: 98 | beta.kubernetes.io/fluentd-ds-ready: "true" 99 | tolerations: 100 | - key: node-role.kubernetes.io/master 101 | operator: Exists 102 | effect: NoSchedule 103 | terminationGracePeriodSeconds: 30 104 | volumes: 105 | - name: varlog 106 | hostPath: 107 | path: /var/log 108 | - name: varlibdockercontainers 109 | hostPath: 110 | path: /data/docker/containers 111 | - name: config-volume 112 | configMap: 113 | name: fluentd-config 114 | -------------------------------------------------------------------------------- /efkdemo/kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana 5 | namespace: logging 6 | labels: 7 | app: kibana 8 | spec: 9 | ports: 10 | - port: 5601 11 | type: NodePort 12 | selector: 13 | app: kibana 14 | --- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: kibana 19 | namespace: logging 20 | labels: 21 | app: kibana 22 | spec: 23 | selector: 24 | matchLabels: 25 | app: kibana 26 | template: 27 | metadata: 28 | labels: 29 | app: kibana 30 | spec: 31 | nodeSelector: 32 | kubernetes.io/hostname: node03 33 | containers: 34 | - name: kibana 35 | image: docker.elastic.co/kibana/kibana-oss:6.4.3 36 | resources: 37 | limits: 38 | cpu: 1000m 39 | requests: 40 | cpu: 100m 41 | env: 42 | - name: ELASTICSEARCH_URL 43 | value: http://elasticsearch:9200 44 | ports: 45 | - containerPort: 5601 -------------------------------------------------------------------------------- /efkdemo/kube-logging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: logging 5 | -------------------------------------------------------------------------------- /elastic-single/elastic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: elasticsearch 5 | spec: 6 | selector: 7 | matchLabels: 8 | component: elasticsearch 9 | template: 10 | metadata: 11 | labels: 12 | component: elasticsearch 13 | spec: 14 | containers: 15 | - name: elasticsearch 16 | image: docker.elastic.co/elasticsearch/elasticsearch:6.2.1 17 | env: 18 | - name: discovery.type 19 | value: single-node 20 | ports: 21 | - containerPort: 9200 22 | name: http 23 | protocol: TCP 24 | 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: elasticsearch 30 | labels: 31 | component: elasticsearch 32 | spec: 33 | type: NodePort 34 | selector: 35 | component: elasticsearch 36 | ports: 37 | - name: http 38 | port: 9200 39 | protocol: TCP -------------------------------------------------------------------------------- /elastic-single/kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: kibana 5 | spec: 6 | selector: 7 | matchLabels: 8 | run: kibana 9 | template: 10 | metadata: 11 | labels: 12 | run: kibana 13 | spec: 14 | containers: 15 | - name: kibana 16 | image: docker.elastic.co/kibana/kibana:6.2.1 17 | env: 18 | - name: ELASTICSEARCH_URL 19 | value: http://elasticsearch:9200 20 | - name: XPACK_SECURITY_ENABLED 21 | value: "true" 22 | ports: 23 | - containerPort: 5601 24 | name: http 25 | protocol: TCP 26 | 27 | --- 28 | apiVersion: v1 29 | kind: Service 30 | metadata: 31 | name: kibana 32 | labels: 33 | run: kibana 34 | spec: 35 | type: NodePort 36 | selector: 37 | run: kibana 38 | ports: 39 | - name: http 40 | port: 5601 41 | protocol: TCP -------------------------------------------------------------------------------- /grafana/grafana-chown-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: grafana-chown 5 | namespace: kube-ops 6 | spec: 7 | template: 8 | spec: 9 | restartPolicy: Never 10 | containers: 11 | - name: grafana-chown 12 | command: ["chown", "-R", "472:472", "/var/lib/grafana"] 13 | image: busybox 14 | imagePullPolicy: IfNotPresent 15 | volumeMounts: 16 | - name: storage 17 | subPath: grafana 18 | mountPath: /var/lib/grafana 19 | volumes: 20 | - name: storage 21 | persistentVolumeClaim: 22 | claimName: grafana -------------------------------------------------------------------------------- /grafana/grafana-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-config 5 | namespace: kube-ops 6 | data: 7 | grafana.ini: | 8 | [smtp] 9 | enabled = true 10 | host = smtp.163.com:25 11 | user = ych_1024@163.com 12 | password = 13 | skip_verify = true 14 | from_address = ych_1024@163.com 15 | [alerting] 16 | enabled = true 17 | execute_alerts = true 18 | -------------------------------------------------------------------------------- /grafana/grafana-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: grafana 5 | namespace: kube-ops 6 | labels: 7 | app: grafana 8 | spec: 9 | revisionHistoryLimit: 10 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | template: 14 | metadata: 15 | labels: 16 | app: grafana 17 | spec: 18 | containers: 19 | - name: grafana 20 | image: grafana/grafana:5.3.4 21 | imagePullPolicy: IfNotPresent 22 | ports: 23 | - containerPort: 3000 24 | name: grafana 25 | env: 26 | - name: GF_SECURITY_ADMIN_USER 27 | value: admin 28 | - name: GF_SECURITY_ADMIN_PASSWORD 29 | value: admin321 30 | readinessProbe: 31 | failureThreshold: 10 32 | httpGet: 33 | path: /api/health 34 | port: 3000 35 | scheme: HTTP 36 | initialDelaySeconds: 60 37 | periodSeconds: 10 38 | successThreshold: 1 39 | timeoutSeconds: 30 40 | livenessProbe: 41 | failureThreshold: 3 42 | httpGet: 43 | path: /api/health 44 | port: 3000 45 | scheme: HTTP 46 | periodSeconds: 10 47 | successThreshold: 1 48 | timeoutSeconds: 1 49 | resources: 50 | limits: 51 | cpu: 100m 52 | memory: 256Mi 53 | requests: 54 | cpu: 100m 55 | memory: 256Mi 56 | volumeMounts: 57 | - mountPath: /var/lib/grafana 58 | subPath: grafana 59 | name: storage 60 | - mountPath: "/etc/grafana" 61 | name: config 62 | securityContext: 63 | fsGroup: 472 64 | runAsUser: 472 65 | volumes: 66 | - name: storage 67 | persistentVolumeClaim: 68 | claimName: grafana 69 | - name: config 70 | configMap: 71 | name: grafana-config -------------------------------------------------------------------------------- /grafana/grafana-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: kube-ops 6 | labels: 7 | app: grafana 8 | spec: 9 | type: NodePort 10 | ports: 11 | - port: 3000 12 | selector: 13 | app: grafana 14 | -------------------------------------------------------------------------------- /grafana/grafana-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: grafana 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | server: 10.151.30.57 13 | path: /data/k8s 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: grafana 19 | namespace: kube-ops 20 | spec: 21 | accessModes: 22 | - ReadWriteOnce 23 | resources: 24 | requests: 25 | storage: 1Gi -------------------------------------------------------------------------------- /hook/poststart-hook.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: hook-demo1 6 | labels: 7 | app: hook 8 | spec: 9 | containers: 10 | - name: hook-demo1 11 | image: nginx 12 | ports: 13 | - name: webport 14 | containerPort: 80 15 | lifecycle: 16 | postStart: 17 | exec: 18 | command: ["/bin/sh", "-c", "echo Hello from the postStart Handler > /usr/share/message"] 19 | -------------------------------------------------------------------------------- /hook/prestop-hook.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: hook-demo2 6 | labels: 7 | app: hook 8 | spec: 9 | containers: 10 | - name: hook-demo2 11 | image: nginx 12 | ports: 13 | - name: webport 14 | containerPort: 80 15 | volumeMounts: 16 | - name: message 17 | mountPath: /usr/share 18 | lifecycle: 19 | preStop: 20 | exec: 21 | command: ["/bin/sh", "-c", "echo Hello from the preStop Handler > /usr/share/message"] 22 | volumes: 23 | - name: message 24 | hostPath: 25 | path: /tmp 26 | -------------------------------------------------------------------------------- /hpademo/hpa-demo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: hpa-demo 6 | labels: 7 | app: hpa 8 | spec: 9 | revisionHistoryLimit: 15 10 | minReadySeconds: 5 11 | strategy: 12 | type: RollingUpdate 13 | rollingUpdate: 14 | maxSurge: 1 15 | maxUnavailable: 1 16 | template: 17 | metadata: 18 | labels: 19 | app: nginx 20 | spec: 21 | containers: 22 | - name: nginx 23 | image: nginx 24 | resources: 25 | requests: 26 | cpu: 100m 27 | ports: 28 | - containerPort: 80 29 | 30 | --- 31 | apiVersion: autoscaling/v1 32 | kind: HorizontalPodAutoscaler 33 | metadata: 34 | name: hpa-demo 35 | namespace: default 36 | spec: 37 | maxReplicas: 10 38 | minReplicas: 1 39 | scaleTargetRef: 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | name: hpa-demo 43 | targetCPUUtilizationPercentage: 5 44 | -------------------------------------------------------------------------------- /initcontainer/initconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: init-demo 6 | labels: 7 | app: init 8 | spec: 9 | initContainers: 10 | - name: install 11 | image: busybox 12 | command: 13 | - wget 14 | - "-O" 15 | - "/work-dir/index.html" 16 | - http://www.baidu.com 17 | volumeMounts: 18 | - name: workdir 19 | mountPath: /work-dir 20 | containers: 21 | - name: nginx 22 | image: nginx 23 | ports: 24 | - containerPort: 80 25 | volumeMounts: 26 | - name: workdir 27 | mountPath: /usr/share/nginx/html 28 | volumes: 29 | - name: workdir 30 | emptyDir: {} 31 | -------------------------------------------------------------------------------- /initcontainer/initpod1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: init-pod 6 | labels: 7 | app: init 8 | spec: 9 | initContainers: 10 | - name: init-myservice 11 | image: busybox 12 | command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] 13 | - name: init-mydb 14 | image: busybox 15 | command: ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;'] 16 | containers: 17 | - name: main-container 18 | image: busybox 19 | command: ['sh', '-c', 'echo The app is running! && sleep 3600'] 20 | -------------------------------------------------------------------------------- /initcontainer/initservice.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: myservice 6 | spec: 7 | ports: 8 | - protocol: TCP 9 | port: 80 10 | targetPort: 6379 11 | 12 | --- 13 | apiVersion: v1 14 | kind: Service 15 | metadata: 16 | name: mydb 17 | spec: 18 | ports: 19 | - protocol: TCP 20 | port: 80 21 | targetPort: 6378 22 | -------------------------------------------------------------------------------- /jenkins/jenkins-slave.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | ENV JAVA_HOME=/usr/local/newhope/java1.8 \ 4 | PATH=/usr/local/newhope/java1.8/bin:$PATH \ 5 | TIMEZONE=Asia/Shanghai \ 6 | LANG=zh_CN.UTF-8 7 | 8 | RUN echo "${TIMEZONE}" > /etc/timezone \ 9 | && echo "$LANG UTF-8" > /etc/locale.gen \ 10 | && apt-get update -q \ 11 | && ln -sf /usr/share/zoneinfo/${TIMEZONE} /etc/localtime \ 12 | && mkdir -p /usr/local/newhope/java1.8 \ 13 | && mkdir -p /home/jenkins/.jenkins \ 14 | && mkdir -p /home/jenkins/agent \ 15 | && mkdir -p /usr/share/jenkins \ 16 | && mkdir -p /root/.kube 17 | 18 | COPY java1.8 /usr/local/newhope/java1.8 19 | COPY kubectl /usr/local/bin/kubectl 20 | COPY jenkins-slave /usr/local/bin/jenkins-slave 21 | COPY slave.jar /usr/share/jenkins 22 | 23 | # java/字符集/DinD/svn/jnlp 24 | RUN mkdir /usr/java/jdk1.8.0_121/bin -p \ 25 | && ln -s /usr/local/newhope/java1.8 /usr/java/jdk1.8.0_121 \ 26 | && DEBIAN_FRONTEND=noninteractive apt-get install -yq curl apt-utils dialog locales apt-transport-https build-essential bzip2 ca-certificates sudo jq unzip zip gnupg2 software-properties-common \ 27 | && update-locale LANG=$LANG \ 28 | && locale-gen $LANG \ 29 | && DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales \ 30 | &&curl -fsSL https://download.docker.com/linux/$(. /etc/os-release; echo "$ID")/gpg |apt-key add - \ 31 | && add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") $(lsb_release -cs) stable" \ 32 | && apt-get update -y \ 33 | && apt-get install -y docker-ce=17.09.1~ce-0~debian \ 34 | && sudo apt-get install -y subversion \ 35 | && groupadd -g 10000 jenkins \ 36 | && useradd -c "Jenkins user" -d $HOME -u 10000 -g 10000 -m jenkins \ 37 | && usermod -a -G docker jenkins \ 38 | && sed -i '/^root/a\jenkins ALL=(ALL:ALL) NOPASSWD:ALL' /etc/sudoers 39 | 40 | USER root 41 | 42 | WORKDIR /home/jenkins 43 | 44 | ENTRYPOINT ["jenkins-slave"] 45 | -------------------------------------------------------------------------------- /jenkins/jenkins.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: jenkins 6 | namespace: kube-ops 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | app: jenkins 12 | spec: 13 | terminationGracePeriodSeconds: 10 14 | serviceAccountName: jenkins 15 | containers: 16 | - name: jenkins 17 | image: jenkins/jenkins:lts 18 | imagePullPolicy: IfNotPresent 19 | ports: 20 | - containerPort: 8080 21 | name: web 22 | protocol: TCP 23 | - containerPort: 50000 24 | name: agent 25 | protocol: TCP 26 | resources: 27 | limits: 28 | cpu: 1000m 29 | memory: 1Gi 30 | requests: 31 | cpu: 500m 32 | memory: 512Mi 33 | livenessProbe: 34 | httpGet: 35 | path: /login 36 | port: 8080 37 | initialDelaySeconds: 60 38 | timeoutSeconds: 5 39 | failureThreshold: 12 # ~2 minutes 40 | readinessProbe: 41 | httpGet: 42 | path: /login 43 | port: 8080 44 | initialDelaySeconds: 60 45 | timeoutSeconds: 5 46 | failureThreshold: 12 # ~2 minutes 47 | volumeMounts: 48 | - name: jenkinshome 49 | subPath: jenkins 50 | mountPath: /var/jenkins_home 51 | env: 52 | - name: LIMITS_MEMORY 53 | valueFrom: 54 | resourceFieldRef: 55 | resource: limits.memory 56 | divisor: 1Mi 57 | - name: JAVA_OPTS 58 | value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai 59 | securityContext: 60 | fsGroup: 1000 61 | volumes: 62 | - name: jenkinshome 63 | persistentVolumeClaim: 64 | claimName: opspvc 65 | -------------------------------------------------------------------------------- /jenkins/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: jenkins 5 | namespace: kube-ops 6 | 7 | --- 8 | 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1beta1 11 | metadata: 12 | name: jenkins 13 | rules: 14 | - apiGroups: ["extensions", "apps"] 15 | resources: ["deployments"] 16 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"] 17 | - apiGroups: [""] 18 | resources: ["services"] 19 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"] 20 | - apiGroups: [""] 21 | resources: ["pods"] 22 | verbs: ["create","delete","get","list","patch","update","watch"] 23 | - apiGroups: [""] 24 | resources: ["pods/exec"] 25 | verbs: ["create","delete","get","list","patch","update","watch"] 26 | - apiGroups: [""] 27 | resources: ["pods/log"] 28 | verbs: ["get","list","watch"] 29 | - apiGroups: [""] 30 | resources: ["secrets"] 31 | verbs: ["get"] 32 | 33 | --- 34 | apiVersion: rbac.authorization.k8s.io/v1beta1 35 | kind: ClusterRoleBinding 36 | metadata: 37 | name: jenkins 38 | roleRef: 39 | apiGroup: rbac.authorization.k8s.io 40 | kind: ClusterRole 41 | name: jenkins 42 | subjects: 43 | - kind: ServiceAccount 44 | name: jenkins 45 | namespace: kube-ops -------------------------------------------------------------------------------- /jenkins/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: jenkins 5 | namespace: kube-ops 6 | labels: 7 | app: jenkins 8 | spec: 9 | selector: 10 | app: jenkins 11 | type: NodePort 12 | ports: 13 | - name: web 14 | port: 8080 15 | targetPort: web 16 | nodePort: 30001 17 | - name: agent 18 | port: 50000 19 | targetPort: agent -------------------------------------------------------------------------------- /jenkins/volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: opspv 5 | spec: 6 | capacity: 7 | storage: 20Gi 8 | accessModes: 9 | - ReadWriteMany 10 | persistentVolumeReclaimPolicy: Delete 11 | nfs: 12 | server: 10.151.30.57 13 | path: /data/k8s 14 | 15 | --- 16 | kind: PersistentVolumeClaim 17 | apiVersion: v1 18 | metadata: 19 | name: opspvc 20 | namespace: kube-ops 21 | spec: 22 | accessModes: 23 | - ReadWriteMany 24 | resources: 25 | requests: 26 | storage: 20Gi -------------------------------------------------------------------------------- /jobdemo/cronjob-demo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1beta1 3 | kind: CronJob 4 | metadata: 5 | name: cronjob-demo 6 | spec: 7 | successfulJobsHistoryLimit: 1 8 | failedJobsHistoryLimit: 10 9 | schedule: "*/1 * * * *" 10 | jobTemplate: 11 | spec: 12 | template: 13 | metadata: 14 | name: cronjob-demo 15 | spec: 16 | restartPolicy: OnFailure 17 | containers: 18 | - name: counter 19 | image: busybox 20 | command: 21 | - "bin/sh" 22 | - "-c" 23 | - "for i in 9 8 7 6 5 4 3 2 1; do echo $i; done" 24 | -------------------------------------------------------------------------------- /jobdemo/job-demo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: job-demo 6 | spec: 7 | template: 8 | metadata: 9 | name: job-demo 10 | spec: 11 | restartPolicy: Never 12 | containers: 13 | - name: counter 14 | image: busybox 15 | command: 16 | - "bin/sh" 17 | - "-c" 18 | - "for i in 9 8 7 6 5 4 3 2 1; do echo $i; done" -------------------------------------------------------------------------------- /livenessprobe/liveness-exec.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: liveness-exec 6 | labels: 7 | app: liveness 8 | spec: 9 | containers: 10 | - name: liveness 11 | image: busybox 12 | args: 13 | - /bin/sh 14 | - -c 15 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 16 | livenessProbe: 17 | exec: 18 | command: 19 | - cat 20 | - /tmp/healthy 21 | initialDelaySeconds: 5 22 | periodSeconds: 5 -------------------------------------------------------------------------------- /livenessprobe/liveness-http.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: liveness-http 6 | labels: 7 | app: liveness 8 | spec: 9 | containers: 10 | - name: liveness 11 | image: cnych/liveness 12 | args: 13 | - /server 14 | livenessProbe: 15 | httpGet: 16 | path: /healthz 17 | port: 8080 18 | initialDelaySeconds: 3 19 | periodSeconds: 3 20 | -------------------------------------------------------------------------------- /livenessprobe/liveness-readness.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: liveness-readiness 6 | labels: 7 | app: liveness-readiness 8 | spec: 9 | containers: 10 | - name: liveness-readiness 11 | image: cnych/liveness 12 | args: 13 | - /server 14 | livenessProbe: 15 | httpGet: 16 | path: /healthz 17 | port: 8080 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | readinessProbe: 21 | tcpSocket: 22 | port: 8080 23 | initialDelaySeconds: 3 24 | perioidSeconds: 3 25 | -------------------------------------------------------------------------------- /logdemo/two-files-counter-pod-streaming-sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: counter 5 | spec: 6 | containers: 7 | - name: count 8 | image: busybox 9 | args: 10 | - /bin/sh 11 | - -c 12 | - > 13 | i=0; 14 | while true; 15 | do 16 | echo "$i: $(date)" >> /var/log/1.log; 17 | echo "$(date) INFO $i" >> /var/log/2.log; 18 | i=$((i+1)); 19 | sleep 1; 20 | done 21 | volumeMounts: 22 | - name: varlog 23 | mountPath: /var/log 24 | - name: count-log-1 25 | image: busybox 26 | args: [/bin/sh, -c, 'tail -n+1 -f /var/log/1.log'] 27 | volumeMounts: 28 | - name: varlog 29 | mountPath: /var/log 30 | - name: count-log-2 31 | image: busybox 32 | args: [/bin/sh, -c, 'tail -n+1 -f /var/log/2.log'] 33 | volumeMounts: 34 | - name: varlog 35 | mountPath: /var/log 36 | volumes: 37 | - name: varlog 38 | emptyDir: {} -------------------------------------------------------------------------------- /mychart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: mychart 5 | version: 0.1.0 -------------------------------------------------------------------------------- /mychart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing {{ .Chart.Name }}. 2 | 3 | Your release is named {{ .Release.Name }}. 4 | 5 | To learn more about the release, try: 6 | 7 | $ helm status {{ .Release.Name }} 8 | $ helm get {{ .Release.Name }} 9 | -------------------------------------------------------------------------------- /mychart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 生成基本的lables标签 */}} 2 | {{- define "mychart.labels" }} 3 | from: helm 4 | date: {{ now | htmlDate }} 5 | chart: {{ .Chart.Name }} 6 | version: {{ .Chart.Version }} 7 | {{- end }} -------------------------------------------------------------------------------- /mychart/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Release.Name }}-configmap 5 | labels: 6 | {{- include "mychart.labels" . | indent 4}} 7 | data: 8 | app: mychart 9 | myvalue: {{ .Values.hello | default "Hello World" | quote }} 10 | {{- $releaseName := .Release.Name }} 11 | {{- with .Values.course }} 12 | k8s: {{ .k8s | upper | quote }} 13 | python: {{ .python | repeat 5 | quote }} 14 | release: {{ $releaseName }} 15 | {{- if eq .python "django" }} 16 | web: "true" 17 | {{- end }} 18 | {{- end }} 19 | courselist: | 20 | {{- range $index, $course := .Values.courselist }} 21 | {{ $course | title | quote }} 22 | {{- end }} 23 | {{- range $key, $val := .Values.course }} 24 | {{ $key }}: {{ $val | upper | quote }} 25 | {{- end }} 26 | {{- include "mychart.labels" . | indent 2 }} -------------------------------------------------------------------------------- /mychart/values.yaml: -------------------------------------------------------------------------------- 1 | course: 2 | k8s: devops 3 | python: django 4 | courselist: 5 | - k8s 6 | - python 7 | - search 8 | - golang -------------------------------------------------------------------------------- /mynginx/.dockerignore: -------------------------------------------------------------------------------- 1 | add.json -------------------------------------------------------------------------------- /mynginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | 3 | # RUN echo '

Hello, World 3!

' > /usr/share/nginx/html/index.html 4 | 5 | WORKDIR /usr/share/nginx/html 6 | 7 | COPY index.html index.html 8 | 9 | -------------------------------------------------------------------------------- /mynginx/add.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | } -------------------------------------------------------------------------------- /mynginx/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Document 8 | 9 | 10 |

Hello Docker~~~

11 | 12 |

Hello Docker~~~

13 | 14 |

Hello Docker~~~

15 | 16 | -------------------------------------------------------------------------------- /mynginx/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "test": 100 3 | } -------------------------------------------------------------------------------- /myproject/context/.dockerignore: -------------------------------------------------------------------------------- 1 | world.txt -------------------------------------------------------------------------------- /myproject/context/hello: -------------------------------------------------------------------------------- 1 | Hello Docker123 -------------------------------------------------------------------------------- /myproject/context/test/test.txt: -------------------------------------------------------------------------------- 1 | test 2 | -------------------------------------------------------------------------------- /myproject/context/world.txt: -------------------------------------------------------------------------------- 1 | world 2 | -------------------------------------------------------------------------------- /myproject/dockerfiles/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox 2 | 3 | WORKDIR / 4 | 5 | COPY /hello / 6 | 7 | RUN cat /hello 8 | 9 | RUN echo /test.txt 10 | -------------------------------------------------------------------------------- /nodedemo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:slim 2 | 3 | RUN mkdir /app 4 | WORKDIR /app 5 | 6 | ONBUILD COPY ./package.json /app 7 | ONBUILD RUN npm install 8 | ONBUILD COPY . /app 9 | 10 | CMD ["npm", "start"] -------------------------------------------------------------------------------- /nodedemo/index.js: -------------------------------------------------------------------------------- 1 | console.log('Hello Docker') 2 | -------------------------------------------------------------------------------- /nodedemo/mynode/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM my-node 2 | -------------------------------------------------------------------------------- /nodedemo/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodedemo", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC" 11 | } 12 | -------------------------------------------------------------------------------- /nodedemo2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:slim 2 | 3 | RUN mkdir /app 4 | WORKDIR /app 5 | 6 | COPY ./package.json /app 7 | RUN npm install 8 | 9 | COPY . /app 10 | CMD ["npm", "start"] -------------------------------------------------------------------------------- /nodedemo2/index.js: -------------------------------------------------------------------------------- 1 | console.log('Hello Docker') 2 | -------------------------------------------------------------------------------- /nodedemo2/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodedemo", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC" 11 | } 12 | -------------------------------------------------------------------------------- /prome/dingtalk-hook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: dingtalk-hook 5 | namespace: kube-ops 6 | spec: 7 | template: 8 | metadata: 9 | labels: 10 | app: dingtalk-hook 11 | spec: 12 | containers: 13 | - name: dingtalk-hook 14 | image: cnych/alertmanager-dingtalk-hook:v0.2 15 | imagePullPolicy: IfNotPresent 16 | ports: 17 | - containerPort: 5000 18 | name: http 19 | env: 20 | - name: ROBOT_TOKEN 21 | valueFrom: 22 | secretKeyRef: 23 | name: dingtalk-secret 24 | key: token 25 | resources: 26 | requests: 27 | cpu: 50m 28 | memory: 100Mi 29 | limits: 30 | cpu: 50m 31 | memory: 100Mi 32 | 33 | --- 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | name: dingtalk-hook 38 | namespace: kube-ops 39 | spec: 40 | selector: 41 | app: dingtalk-hook 42 | ports: 43 | - name: hook 44 | port: 5000 45 | targetPort: http 46 | 47 | -------------------------------------------------------------------------------- /prome/prome-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus-config 5 | namespace: kube-ops 6 | data: 7 | prometheus.yml: | 8 | global: 9 | scrape_interval: 15s 10 | evaluation_interval: 15s 11 | scrape_configs: 12 | - job_name: prometheus 13 | static_configs: 14 | - targets: ['localhost:9090'] 15 | - job_name: traefik 16 | static_configs: 17 | - targets: ['traefik-ingress-service.kube-system.svc.cluster.local:8080'] 18 | - job_name: redis 19 | static_configs: 20 | - targets: ['redis:9121'] 21 | - job_name: kubernetes-node-exporter 22 | kubernetes_sd_configs: 23 | - role: node 24 | relabel_configs: 25 | - source_labels: [__address__] 26 | regex: '(.*):10250' 27 | replacement: '${1}:9100' 28 | target_label: __address__ 29 | action: replace 30 | - action: labelmap 31 | regex: __meta_kubernetes_node_label_(.+) 32 | - job_name: kubernetes-kubelet 33 | kubernetes_sd_configs: 34 | - role: node 35 | relabel_configs: 36 | - source_labels: [__address__] 37 | regex: '(.*):10250' 38 | replacement: '${1}:10255' 39 | target_label: __address__ 40 | action: replace 41 | - action: labelmap 42 | regex: __meta_kubernetes_node_label_(.+) 43 | - job_name: 'kubernetes-cadvisor' 44 | kubernetes_sd_configs: 45 | - role: node 46 | scheme: https 47 | tls_config: 48 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 49 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 50 | relabel_configs: 51 | - action: labelmap 52 | regex: __meta_kubernetes_node_label_(.+) 53 | - target_label: __address__ 54 | replacement: kubernetes.default.svc:443 55 | - source_labels: [__meta_kubernetes_node_name] 56 | regex: (.+) 57 | target_label: __metrics_path__ 58 | replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor 59 | - job_name: 'kubernetes-apiservers' 60 | kubernetes_sd_configs: 61 | - role: endpoints 62 | scheme: https 63 | tls_config: 64 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 65 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 66 | relabel_configs: 67 | - action: keep 68 | source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 69 | regex: default;kubernetes;https 70 | - job_name: 'kubernetes-service-endpoints' 71 | kubernetes_sd_configs: 72 | - role: endpoints 73 | relabel_configs: 74 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] 75 | action: keep 76 | regex: true 77 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] 78 | action: replace 79 | target_label: __scheme__ 80 | regex: (https?) 81 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] 82 | action: replace 83 | target_label: __metrics_path__ 84 | regex: (.+) 85 | - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] 86 | action: replace 87 | target_label: __address__ 88 | regex: ([^:]+)(?::\d+)?;(\d+) 89 | replacement: $1:$2 90 | - action: labelmap 91 | regex: __meta_kubernetes_service_label_(.+) 92 | - source_labels: [__meta_kubernetes_namespace] 93 | action: replace 94 | target_label: kubernetes_namespace 95 | - source_labels: [__meta_kubernetes_service_name] 96 | action: replace 97 | target_label: kubernetes_name 98 | -------------------------------------------------------------------------------- /prome/prome-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: kube-ops 6 | labels: 7 | app: prometheus 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: prometheus 13 | spec: 14 | securityContext: 15 | runAsUser: 0 16 | serviceAccountName: prometheus 17 | containers: 18 | - name: prometheus 19 | image: prom/prometheus:v2.4.3 20 | imagePullPolicy: IfNotPresent 21 | args: 22 | - "--config.file=/etc/prometheus/prometheus.yml" 23 | - "--storage.tsdb.path=/prometheus" 24 | - "--storage.tsdb.retention=24h" 25 | - "--web.enable-admin-api" 26 | - "--web.enable-lifecycle" 27 | ports: 28 | - containerPort: 9090 29 | name: http 30 | volumeMounts: 31 | - mountPath: "/prometheus" 32 | subPath: prometheus 33 | name: data 34 | - mountPath: "/etc/prometheus" 35 | name: config 36 | resources: 37 | requests: 38 | cpu: 100m 39 | memory: 512Mi 40 | limits: 41 | cpu: 100m 42 | memory: 512Mi 43 | - name: alertmanager 44 | image: prom/alertmanager:v0.15.3 45 | imagePullPolicy: IfNotPresent 46 | args: 47 | - "--config.file=/etc/alertmanager/config.yml" 48 | - "--storage.path=/alertmanager/data" 49 | ports: 50 | - containerPort: 9093 51 | name: http 52 | volumeMounts: 53 | - mountPath: "/etc/alertmanager" 54 | name: alertcfg 55 | resources: 56 | requests: 57 | cpu: 100m 58 | memory: 256Mi 59 | limits: 60 | cpu: 100m 61 | memory: 256Mi 62 | volumes: 63 | - name: data 64 | persistentVolumeClaim: 65 | claimName: prometheus 66 | - name: config 67 | configMap: 68 | name: prometheus-config 69 | - name: alertcfg 70 | configMap: 71 | name: alert-config 72 | 73 | -------------------------------------------------------------------------------- /prome/prome-node-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: node-exporter 5 | namespace: kube-ops 6 | labels: 7 | name: node-exporter 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | name: node-exporter 13 | spec: 14 | hostPID: true 15 | hostIPC: true 16 | hostNetwork: true 17 | containers: 18 | - name: node-exporter 19 | image: prom/node-exporter:v0.16.0 20 | ports: 21 | - containerPort: 9100 22 | resources: 23 | requests: 24 | cpu: 0.15 25 | securityContext: 26 | privileged: true 27 | args: 28 | - --path.procfs 29 | - /host/proc 30 | - --path.sysfs 31 | - /host/sys 32 | - --collector.filesystem.ignored-mount-points 33 | - '"^/(sys|proc|dev|host|etc)($|/)"' 34 | volumeMounts: 35 | - name: dev 36 | mountPath: /host/dev 37 | - name: proc 38 | mountPath: /host/proc 39 | - name: sys 40 | mountPath: /host/sys 41 | - name: rootfs 42 | mountPath: /rootfs 43 | tolerations: 44 | - key: "node-role.kubernetes.io/master" 45 | operator: "Exists" 46 | effect: "NoSchedule" 47 | volumes: 48 | - name: proc 49 | hostPath: 50 | path: /proc 51 | - name: dev 52 | hostPath: 53 | path: /dev 54 | - name: sys 55 | hostPath: 56 | path: /sys 57 | - name: rootfs 58 | hostPath: 59 | path: / 60 | -------------------------------------------------------------------------------- /prome/prome-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: kube-ops 6 | 7 | 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRole 11 | metadata: 12 | name: prometheus 13 | rules: 14 | - apiGroups: [""] 15 | resources: 16 | - nodes 17 | - services 18 | - endpoints 19 | - pods 20 | - nodes/proxy 21 | - configmaps 22 | verbs: ["get", "list", "watch"] 23 | - nonResourceURLs: ["/metrics"] 24 | verbs: ["get"] 25 | 26 | 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1beta1 29 | kind: ClusterRoleBinding 30 | metadata: 31 | name: prometheus 32 | roleRef: 33 | apiGroup: rbac.authorization.k8s.io 34 | kind: ClusterRole 35 | name: prometheus 36 | subjects: 37 | - kind: ServiceAccount 38 | name: prometheus 39 | namespace: kube-ops 40 | -------------------------------------------------------------------------------- /prome/prome-redis-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: redis 5 | namespace: kube-ops 6 | spec: 7 | template: 8 | metadata: 9 | labels: 10 | app: redis 11 | spec: 12 | containers: 13 | - name: redis 14 | image: redis:4 15 | resources: 16 | requests: 17 | cpu: 100m 18 | memory: 100Mi 19 | ports: 20 | - containerPort: 6379 21 | - name: redis-exporter 22 | image: oliver006/redis_exporter:latest 23 | resources: 24 | requests: 25 | cpu: 100m 26 | memory: 100Mi 27 | ports: 28 | - containerPort: 9121 29 | --- 30 | kind: Service 31 | apiVersion: v1 32 | metadata: 33 | name: redis 34 | namespace: kube-ops 35 | annotations: 36 | prometheus.io/scrape: "true" 37 | prometheus.io/port: "9121" 38 | spec: 39 | selector: 40 | app: redis 41 | ports: 42 | - name: redis 43 | port: 6379 44 | targetPort: 6379 45 | - name: prom 46 | port: 9121 47 | targetPort: 9121 48 | -------------------------------------------------------------------------------- /prome/prome-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: kube-ops 6 | labels: 7 | app: prometheus 8 | spec: 9 | selector: 10 | app: prometheus 11 | type: NodePort 12 | ports: 13 | - name: web 14 | port: 9090 15 | targetPort: 9090 16 | - name: alert 17 | port: 9093 18 | targetPort: 9093 19 | -------------------------------------------------------------------------------- /prome/prome-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: prometheus 5 | spec: 6 | capacity: 7 | storage: 10Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | server: 10.151.30.57 13 | path: /data/k8s 14 | 15 | --- 16 | apiVersion: v1 17 | kind: PersistentVolumeClaim 18 | metadata: 19 | name: prometheus 20 | namespace: kube-ops 21 | spec: 22 | accessModes: 23 | - ReadWriteOnce 24 | resources: 25 | requests: 26 | storage: 10Gi 27 | -------------------------------------------------------------------------------- /prometheus/node-exporter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: DaemonSet 4 | metadata: 5 | name: node-exporter 6 | namespace: kube-ops 7 | labels: 8 | app: node-exporter 9 | spec: 10 | template: 11 | metadata: 12 | labels: 13 | app: node-exporter 14 | spec: 15 | containers: 16 | - image: prom/node-exporter:v0.16.0 17 | name: node-exporter 18 | ports: 19 | - containerPort: 9100 20 | protocol: TCP 21 | name: http 22 | tolerations: 23 | - key: "node-role.kubernetes.io/master" 24 | operator: "Exists" 25 | effect: "NoSchedule" 26 | 27 | --- 28 | apiVersion: v1 29 | kind: Service 30 | metadata: 31 | labels: 32 | app: node-exporter 33 | name: node-exporter 34 | namespace: kube-ops 35 | spec: 36 | ports: 37 | - name: http 38 | port: 9100 39 | nodePort: 31009 40 | protocol: TCP 41 | type: NodePort 42 | selector: 43 | app: node-exporter 44 | -------------------------------------------------------------------------------- /prometheus/prometheus-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus-config 5 | namespace: kube-ops 6 | data: 7 | prometheus.yml: | 8 | global: 9 | scrape_interval: 30s 10 | scrape_timeout: 30s 11 | scrape_configs: 12 | - job_name: 'prometheus' 13 | static_configs: 14 | - targets: ['localhost:9090'] 15 | 16 | - job_name: 'kubernetes-apiservers' 17 | scheme: https 18 | kubernetes_sd_configs: 19 | - role: endpoints 20 | tls_config: 21 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 22 | insecure_skip_verify: true 23 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 24 | relabel_configs: 25 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 26 | action: keep 27 | regex: default;kubernetes;https 28 | 29 | - job_name: 'kubernetes-nodes' 30 | scheme: https 31 | tls_config: 32 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 33 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 34 | kubernetes_sd_configs: 35 | - role: node 36 | relabel_configs: 37 | - action: labelmap 38 | regex: __meta_kubernetes_node_label_(.+) 39 | - target_label: __address__ 40 | replacement: kubernetes.default.svc:443 41 | - source_labels: [__meta_kubernetes_node_name] 42 | regex: (.+) 43 | target_label: __metrics_path__ 44 | replacement: /api/v1/nodes/${1}/proxy/metrics 45 | 46 | - job_name: 'kubernetes-cadvisor' 47 | scheme: https 48 | tls_config: 49 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 50 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 51 | kubernetes_sd_configs: 52 | - role: node 53 | relabel_configs: 54 | - action: labelmap 55 | regex: __meta_kubernetes_node_label_(.+) 56 | - target_label: __address__ 57 | replacement: kubernetes.default.svc:443 58 | - source_labels: [__meta_kubernetes_node_name] 59 | regex: (.+) 60 | target_label: __metrics_path__ 61 | replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor 62 | 63 | - job_name: 'kubernetes-node-exporter' 64 | scheme: http 65 | tls_config: 66 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 67 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 68 | kubernetes_sd_configs: 69 | - role: node 70 | relabel_configs: 71 | - action: labelmap 72 | regex: __meta_kubernetes_node_label_(.+) 73 | - source_labels: [__meta_kubernetes_role] 74 | action: replace 75 | target_label: kubernetes_role 76 | - source_labels: [__address__] 77 | regex: '(.*):10250' 78 | replacement: '${1}:31009' 79 | target_label: __address__ 80 | -------------------------------------------------------------------------------- /prometheus/prometheus-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: kube-ops 6 | labels: 7 | app: prometheus 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: prometheus 13 | spec: 14 | serviceAccountName: prometheus 15 | containers: 16 | - image: prom/prometheus:v2.4.3 17 | name: prometheus 18 | command: 19 | - "/bin/prometheus" 20 | args: 21 | - "--config.file=/etc/prometheus/prometheus.yml" 22 | - "--storage.tsdb.path=/prometheus" 23 | - "--storage.tsdb.retention=24h" 24 | - "--web.enable-admin-api" # 控制对admin HTTP API的访问,其中包括删除时间序列等功能 25 | - "--web.enable-lifecycle" # 支持热更新,直接执行localhost:9090/-/reload立即生效 26 | ports: 27 | - containerPort: 9090 28 | protocol: TCP 29 | name: http 30 | volumeMounts: 31 | - mountPath: "/prometheus" 32 | subPath: prometheus 33 | name: data 34 | - mountPath: "/etc/prometheus" 35 | name: config-volume 36 | resources: 37 | requests: 38 | cpu: 100m 39 | memory: 100Mi 40 | limits: 41 | cpu: 200m 42 | memory: 1Gi 43 | securityContext: 44 | runAsUser: 0 45 | volumes: 46 | - name: data 47 | persistentVolumeClaim: 48 | claimName: prometheus 49 | - configMap: 50 | name: prometheus-config 51 | name: config-volume 52 | 53 | -------------------------------------------------------------------------------- /prometheus/prometheus-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: prometheus 5 | spec: 6 | capacity: 7 | storage: 10Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | server: 10.151.30.57 13 | path: /data/k8s 14 | 15 | --- 16 | apiVersion: v1 17 | kind: PersistentVolumeClaim 18 | metadata: 19 | name: prometheus 20 | namespace: kube-ops 21 | spec: 22 | accessModes: 23 | - ReadWriteOnce 24 | resources: 25 | requests: 26 | storage: 10Gi 27 | -------------------------------------------------------------------------------- /prometheus/prometheus-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: kube-ops 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | name: prometheus 12 | rules: 13 | - apiGroups: [""] 14 | resources: 15 | - nodes 16 | - services 17 | - endpoints 18 | - pods 19 | - nodes/proxy 20 | verbs: ["get", "list", "watch"] 21 | - apiGroups: [""] 22 | resources: 23 | - configmaps 24 | verbs: ["get"] 25 | - nonResourceURLs: ["/metics"] # 对非资源型 endpoint metrics 进行 get 操作 26 | verbs: ["get"] 27 | 28 | --- 29 | apiVersion: rbac.authorization.k8s.io/v1beta1 30 | kind: ClusterRoleBinding 31 | metadata: 32 | name: prometheus 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: prometheus 37 | subjects: 38 | - kind: ServiceAccount 39 | name: prometheus 40 | namespace: kube-ops 41 | -------------------------------------------------------------------------------- /prometheus/prometheus-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: kube-ops 6 | labels: 7 | app: prometheus 8 | spec: 9 | selector: 10 | app: prometheus 11 | type: NodePort 12 | ports: 13 | - name: web 14 | port: 9090 15 | targetPort: http 16 | -------------------------------------------------------------------------------- /pvdemo/nfs-pvc-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nfs-pvc-deploy 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nfs-pvc 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx:1.7.9 15 | imagePullPolicy: IfNotPresent 16 | ports: 17 | - name: web 18 | containerPort: 80 19 | volumeMounts: 20 | - name: www 21 | subPath: nginxpvc-test 22 | mountPath: /usr/share/nginx/html 23 | volumes: 24 | - name: www 25 | persistentVolumeClaim: 26 | claimName: pvc2-nfs 27 | 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: nfs-pvc 33 | labels: 34 | app: nfs-pvc-svc 35 | spec: 36 | type: NodePort 37 | ports: 38 | - port: 80 39 | targetPort: web 40 | selector: 41 | app: nfs-pvc -------------------------------------------------------------------------------- /pvdemo/pv-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-nfs 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | server: 10.151.30.57 13 | path: /data/k8s 14 | -------------------------------------------------------------------------------- /pvdemo/pv2-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv2-nfs 5 | labels: 6 | app: nfs 7 | spec: 8 | capacity: 9 | storage: 2Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | persistentVolumeReclaimPolicy: Recycle 13 | nfs: 14 | server: 10.151.30.57 15 | path: /data/k8s 16 | -------------------------------------------------------------------------------- /pvdemo/pvc-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-nfs 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi -------------------------------------------------------------------------------- /pvdemo/pvc2-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc2-nfs 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 2Gi 11 | selector: 12 | matchLabels: 13 | app: nfs -------------------------------------------------------------------------------- /pydemo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6-alpine 2 | ADD . /code 3 | WORKDIR /code 4 | RUN pip install redis flask 5 | CMD ["python", "app.py"] 6 | -------------------------------------------------------------------------------- /pydemo/app.py: -------------------------------------------------------------------------------- 1 | import time 2 | import redis 3 | from flask import Flask 4 | 5 | app = Flask(__name__) 6 | cache = redis.Redis(host='redis', port=6379) 7 | 8 | 9 | def get_count(): 10 | retries = 5 11 | while True: 12 | try: 13 | return cache.incr('hits') 14 | except redis.exceptions.ConnectionError as exc: 15 | if retries == 0: 16 | raise exc 17 | retries -= 1 18 | time.sleep(0.3) 19 | 20 | 21 | @app.route('/') 22 | def hello(): 23 | cnt = get_count() 24 | return 'Hello World! cnt={}\n'.format(cnt) 25 | 26 | 27 | if __name__ == '__main__': 28 | app.run(host='0.0.0.0', debug=True) 29 | -------------------------------------------------------------------------------- /pydemo/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | web: 4 | build: . 5 | ports: 6 | - "5000:5000" 7 | volumes: 8 | - .:/code 9 | redis: 10 | image: "redis:alpine" -------------------------------------------------------------------------------- /rbacdemo/haimaxy-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: haimaxy-role 5 | namespace: kube-system 6 | rules: 7 | - apiGroups: ["", "apps", "extensions"] 8 | resources: ["pods", "deployments", "replicasets"] 9 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 10 | -------------------------------------------------------------------------------- /rbacdemo/haimaxy-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: haimaxy-rolebinding 5 | namespace: kube-system 6 | subjects: 7 | - kind: User 8 | name: haimaxy 9 | apiGroup: "" 10 | roleRef: 11 | kind: Role 12 | name: haimaxy-role 13 | apiGroup: "" 14 | -------------------------------------------------------------------------------- /rbacdemo/haimaxy-sa-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: haimaxy-sa2 5 | namespace: kube-system 6 | 7 | --- 8 | 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRoleBinding 11 | metadata: 12 | name: haimaxy-sa2-clusterrolebinding 13 | subjects: 14 | - kind: ServiceAccount 15 | name: haimaxy-sa2 16 | namespace: kube-system 17 | roleRef: 18 | kind: ClusterRole 19 | name: cluster-admin 20 | apiGroup: rbac.authorization.k8s.io 21 | -------------------------------------------------------------------------------- /rbacdemo/haimaxy-sa-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: haimaxy-sa-role 5 | namespace: kube-system 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["get", "list"] 10 | - apiGroups: ["apps", "extensions"] 11 | resources: ["deployments", "replicasets"] 12 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 13 | -------------------------------------------------------------------------------- /rbacdemo/haimaxy-sa-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: haimaxy-sa-rolebinding 5 | namespace: kube-system 6 | subjects: 7 | - kind: ServiceAccount 8 | name: haimaxy-sa 9 | namespace: kube-system 10 | roleRef: 11 | kind: Role 12 | name: haimaxy-sa-role 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /rcdemo/rc-demo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ReplicationController 4 | metadata: 5 | name: rc-demo 6 | labels: 7 | app: rc 8 | spec: 9 | replicas: 3 10 | template: 11 | metadata: 12 | labels: 13 | app: rc 14 | spec: 15 | containers: 16 | - name: nginx-demo 17 | image: nginx 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /scheduler/node-affinity-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: affinity 5 | labels: 6 | app: affinity 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 10 10 | template: 11 | metadata: 12 | labels: 13 | app: affinity 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.7.9 18 | ports: 19 | - name: http 20 | containerPort: 80 21 | affinity: 22 | nodeAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | nodeSelectorTerms: 25 | - matchExpressions: 26 | - key: kubernetes.io/hostname 27 | operator: NotIn 28 | values: 29 | - node03 30 | preferredDuringSchedulingIgnoredDuringExecution: 31 | - weight: 1 32 | preference: 33 | matchExpressions: 34 | - key: com 35 | operator: In 36 | values: 37 | - youdianzhishi 38 | 39 | -------------------------------------------------------------------------------- /scheduler/node-selector-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app: busybox-pod 6 | name: test-busybox 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | image: busybox 13 | name: test-busybox 14 | nodeSelector: 15 | com: youdianzhishi 16 | -------------------------------------------------------------------------------- /scheduler/pod-affinity-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: affinity 5 | labels: 6 | app: affinity 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 10 10 | template: 11 | metadata: 12 | labels: 13 | app: affinity 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.7.9 18 | ports: 19 | - name: http 20 | containerPort: 80 21 | affinity: 22 | podAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | - labelSelector: 25 | matchExpressions: 26 | - key: app 27 | operator: In 28 | values: 29 | - busybox-pod 30 | topologyKey: kubernetes.io/hostname 31 | -------------------------------------------------------------------------------- /scheduler/pod-antiaffinity-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: affinity 5 | labels: 6 | app: affinity 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 10 10 | template: 11 | metadata: 12 | labels: 13 | app: affinity 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.7.9 18 | ports: 19 | - name: http 20 | containerPort: 80 21 | affinity: 22 | podAntiAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | - labelSelector: 25 | matchExpressions: 26 | - key: app 27 | operator: In 28 | values: 29 | - busybox-pod 30 | topologyKey: kubernetes.io/hostname 31 | -------------------------------------------------------------------------------- /secretdemo/secret-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysecret 5 | type: Opaque 6 | data: 7 | username: YWRtaW4= 8 | password: YWRtaW4zMjE= 9 | -------------------------------------------------------------------------------- /secretdemo/secret-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: foo 5 | spec: 6 | containers: 7 | - name: foo 8 | image: 192.168.1.100:5000/test:v1 9 | imagePullSecretes: 10 | - name: myregistry -------------------------------------------------------------------------------- /secretdemo/secret1-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret1-pod 5 | spec: 6 | containers: 7 | - name: secret1 8 | image: busybox 9 | command: ["/bin/sh", "-c", "env"] 10 | env: 11 | - name: USERNAME 12 | valueFrom: 13 | secretKeyRef: 14 | name: mysecret 15 | key: username 16 | - name: PASSWORD 17 | valueFrom: 18 | secretKeyRef: 19 | name: mysecret 20 | key: password -------------------------------------------------------------------------------- /secretdemo/secret2-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret2-pod 5 | spec: 6 | containers: 7 | - name: secret2 8 | image: busybox 9 | command: ["/bin/sh", "-c", "ls /etc/secrets"] 10 | volumeMounts: 11 | - name: secrets 12 | mountPath: /etc/secrets 13 | volumes: 14 | - name: secrets 15 | secret: 16 | secretName: mysecret 17 | -------------------------------------------------------------------------------- /servicedemo/service-demo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: myservice 6 | spec: 7 | selector: 8 | environment: in (production, qa) 9 | type: NodePort 10 | ports: 11 | - name: mynginx-http 12 | protocol: TCP 13 | port: 80 14 | targetPort: nginxweb 15 | -------------------------------------------------------------------------------- /statefulsetdemo/pv001.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv001 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recyle 11 | hostPath: 12 | path: /tmp/data 13 | 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolume 17 | metadata: 18 | name: pv002 19 | spec: 20 | capacity: 21 | storage: 1Gi 22 | accessModes: 23 | - ReadWriteOnce 24 | persistentVolumeReclaimPolicy: Recyle 25 | hostPath: 26 | path: /tmp/data 27 | -------------------------------------------------------------------------------- /statefulsetdemo/statefulset-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx 5 | spec: 6 | ports: 7 | - port: 80 8 | name: web 9 | clusterIP: None 10 | selector: 11 | app: nginx 12 | role: stateful 13 | 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: web 19 | spec: 20 | serviceName: "nginx" 21 | replicas: 2 22 | selector: 23 | matchLabels: 24 | app: nginx 25 | role: stateful 26 | template: 27 | metadata: 28 | labels: 29 | app: nginx 30 | role: stateful 31 | spec: 32 | containers: 33 | - name: nginx 34 | image: cnych/nginx-slim:0.8 35 | ports: 36 | - containerPort: 80 37 | name: web 38 | volumeMounts: 39 | - name: www 40 | mountPath: /usr/share/nginx/html 41 | volumeClaimTemplates: 42 | - metadata: 43 | name: www 44 | spec: 45 | accessModes: ["ReadWriteOnce"] 46 | resources: 47 | requests: 48 | storage: 1Gi 49 | -------------------------------------------------------------------------------- /staticpod/static-pod.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: static-pod1 6 | labels: 7 | app: static 8 | spec: 9 | containers: 10 | - name: web 11 | image: nginx 12 | ports: 13 | - name: webport 14 | containerPort: 80 15 | -------------------------------------------------------------------------------- /storageclassdemo/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: course-nfs-storage 5 | provisioner: fuseim.pri/ifs 6 | -------------------------------------------------------------------------------- /storageclassdemo/deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: nfs-client-provisioner 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: Recreate 9 | template: 10 | metadata: 11 | labels: 12 | app: nfs-client-provisioner 13 | spec: 14 | serviceAccountName: nfs-client-provisioner 15 | containers: 16 | - name: nfs-client-provisioner 17 | image: quay.io/external_storage/nfs-client-provisioner:latest 18 | volumeMounts: 19 | - name: nfs-client-root 20 | mountPath: /persistentvolumes 21 | env: 22 | - name: PROVISIONER_NAME 23 | value: fuseim.pri/ifs 24 | - name: NFS_SERVER 25 | value: 10.151.30.57 26 | - name: NFS_PATH 27 | value: /data/k8s 28 | volumes: 29 | - name: nfs-client-root 30 | nfs: 31 | server: 10.151.30.57 32 | path: /data/k8s -------------------------------------------------------------------------------- /storageclassdemo/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nfs-client-provisioner 5 | 6 | --- 7 | kind: ClusterRole 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: nfs-client-provisioner-runner 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["persistentvolumes"] 14 | verbs: ["get", "list", "watch", "create", "delete"] 15 | - apiGroups: [""] 16 | resources: ["persistentvolumeclaims"] 17 | verbs: ["get", "list", "watch", "update"] 18 | - apiGroups: ["storage.k8s.io"] 19 | resources: ["storageclasses"] 20 | verbs: ["get", "list", "watch"] 21 | - apiGroups: [""] 22 | resources: ["events"] 23 | verbs: ["list", "watch", "create", "update", "patch"] 24 | - apiGroups: [""] 25 | resources: ["endpoints"] 26 | verbs: ["create", "delete", "get", "list", "watch", "patch", "update"] 27 | 28 | --- 29 | kind: ClusterRoleBinding 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | metadata: 32 | name: run-nfs-client-provisioner 33 | subjects: 34 | - kind: ServiceAccount 35 | name: nfs-client-provisioner 36 | namespace: default 37 | roleRef: 38 | kind: ClusterRole 39 | name: nfs-client-provisioner-runner 40 | apiGroup: rbac.authorization.k8s.io 41 | -------------------------------------------------------------------------------- /storageclassdemo/test-pod.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: test-pod 5 | spec: 6 | containers: 7 | - name: test-pod 8 | image: busybox 9 | imagePullPolicy: IfNotPresent 10 | command: 11 | - "/bin/sh" 12 | args: 13 | - "-c" 14 | - "touch /mnt/SUCCESS && exit 0 || exit 1" 15 | volumeMounts: 16 | - name: nfs-pvc 17 | mountPath: "/mnt" 18 | restartPolicy: "Never" 19 | volumes: 20 | - name: nfs-pvc 21 | persistentVolumeClaim: 22 | claimName: test-pvc 23 | -------------------------------------------------------------------------------- /storageclassdemo/test-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: test-pvc 5 | annotations: 6 | volume.beta.kubernetes.io/storage-class: "course-nfs-storage" 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 1Mi 13 | -------------------------------------------------------------------------------- /storageclassdemo/test-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: sc-demo 5 | spec: 6 | replicas: 3 7 | serviceName: "nginx" 8 | template: 9 | metadata: 10 | labels: 11 | app: sc-web 12 | spec: 13 | containers: 14 | - name: nginx 15 | image: nginx:1.7.9 16 | imagePullPolicy: IfNotPresent 17 | ports: 18 | - containerPort: 80 19 | volumeMounts: 20 | - name: www 21 | mountPath: /usr/share/nginx/html 22 | volumeClaimTemplates: 23 | - metadata: 24 | name: www 25 | annotations: 26 | volume.beta.kubernetes.io/storage-class: "course-nfs-storage" 27 | spec: 28 | accessModes: 29 | - ReadWriteMany 30 | resources: 31 | requests: 32 | storage: 1Mi -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | while true; do wget -q -O- http://k8s.haimaxy.com:31306; done -------------------------------------------------------------------------------- /traefikdemo/example-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: example-web-app 5 | annotations: 6 | kubernetes.io/ingress.class: traefik 7 | spec: 8 | rules: 9 | - host: example.haimaxy.com 10 | http: 11 | paths: 12 | - path: /s1 13 | backend: 14 | serviceName: svc1 15 | servicePort: 8080 16 | - path: /s2 17 | backend: 18 | serviceName: svc2 19 | servicePort: 8080 20 | - path: / 21 | backend: 22 | serviceName: svc3 23 | servicePort: 8080 24 | 25 | -------------------------------------------------------------------------------- /traefikdemo/example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: svc1 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | app: svc1 10 | spec: 11 | containers: 12 | - name: svc1 13 | image: cnych/example-web-service 14 | env: 15 | - name: APP_SVC 16 | value: svc1 17 | ports: 18 | - containerPort: 8080 19 | protocol: TCP 20 | 21 | --- 22 | apiVersion: extensions/v1beta1 23 | kind: Deployment 24 | metadata: 25 | name: svc2 26 | spec: 27 | template: 28 | metadata: 29 | labels: 30 | app: svc2 31 | spec: 32 | containers: 33 | - name: svc2 34 | image: cnych/example-web-service 35 | env: 36 | - name: APP_SVC 37 | value: svc2 38 | ports: 39 | - containerPort: 8080 40 | protocol: TCP 41 | 42 | --- 43 | apiVersion: extensions/v1beta1 44 | kind: Deployment 45 | metadata: 46 | name: svc3 47 | spec: 48 | template: 49 | metadata: 50 | labels: 51 | app: svc3 52 | spec: 53 | containers: 54 | - name: svc3 55 | image: cnych/example-web-service 56 | env: 57 | - name: APP_SVC 58 | value: svc3 59 | ports: 60 | - containerPort: 8080 61 | protocol: TCP 62 | 63 | 64 | --- 65 | apiVersion: v1 66 | kind: Service 67 | metadata: 68 | labels: 69 | app: svc1 70 | name: svc1 71 | spec: 72 | ports: 73 | - name: http 74 | port: 8080 75 | selector: 76 | app: svc1 77 | 78 | --- 79 | apiVersion: v1 80 | kind: Service 81 | metadata: 82 | labels: 83 | app: svc2 84 | name: svc2 85 | spec: 86 | ports: 87 | - name: http 88 | port: 8080 89 | selector: 90 | app: svc2 91 | 92 | --- 93 | apiVersion: v1 94 | kind: Service 95 | metadata: 96 | labels: 97 | app: svc3 98 | name: svc3 99 | spec: 100 | ports: 101 | - name: http 102 | port: 8080 103 | selector: 104 | app: svc3 -------------------------------------------------------------------------------- /traefikdemo/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: traefik-web-ui 5 | namespace: kube-system 6 | annotations: 7 | kubernetes.io/ingress.class: traefik 8 | spec: 9 | rules: 10 | - host: traefik.haimaxy.com 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: traefik-ingress-service 15 | servicePort: admin 16 | -------------------------------------------------------------------------------- /traefikdemo/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: traefik-ingress-controller 6 | namespace: kube-system 7 | 8 | --- 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1beta1 11 | metadata: 12 | name: traefik-ingress-controller 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - services 18 | - endpoints 19 | - secrets 20 | verbs: 21 | - get 22 | - list 23 | - watch 24 | - apiGroups: 25 | - extensions 26 | resources: 27 | - ingresses 28 | verbs: 29 | - get 30 | - list 31 | - watch 32 | --- 33 | kind: ClusterRoleBinding 34 | apiVersion: rbac.authorization.k8s.io/v1beta1 35 | metadata: 36 | name: traefik-ingress-controller 37 | roleRef: 38 | apiGroup: rbac.authorization.k8s.io 39 | kind: ClusterRole 40 | name: traefik-ingress-controller 41 | subjects: 42 | - kind: ServiceAccount 43 | name: traefik-ingress-controller 44 | namespace: kube-system 45 | -------------------------------------------------------------------------------- /traefikdemo/traefik.toml: -------------------------------------------------------------------------------- 1 | defaultEntryPoints = ['http', 'https'] 2 | 3 | [entryPoints] 4 | [entryPoints.http] 5 | address = ":80" 6 | [entryPoints.http.redirect] 7 | entryPoint = 'https' 8 | [entryPoints.https] 9 | address = ":443" 10 | [entryPoints.https.tls] 11 | [[entryPoints.https.tls.certificates]] 12 | CertFile = "/ssl/tls.crt" 13 | KeyFile = "/ssl/tls.key" 14 | -------------------------------------------------------------------------------- /traefikdemo/traefik.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | name: traefik-ingress-controller 6 | namespace: kube-system 7 | labels: 8 | k8s-app: traefik-ingress-lb 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | k8s-app: traefik-ingress-lb 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: traefik-ingress-lb 18 | name: traefik-ingress-lb 19 | spec: 20 | serviceAccountName: traefik-ingress-controller 21 | terminationGracePeriodSeconds: 60 22 | volumes: 23 | - name: ssl 24 | secret: 25 | secretName: traefik-cert 26 | - name: config 27 | configMap: 28 | name: traefik-conf 29 | tolerations: 30 | - operator: "Exists" 31 | nodeSelector: 32 | kubernetes.io/hostname: master 33 | containers: 34 | - image: traefik 35 | name: traefik-ingress-lb 36 | volumeMounts: 37 | - mountPath: "/ssl" 38 | name: ssl 39 | - mountPath: "/config" 40 | name: config 41 | ports: 42 | - name: http 43 | containerPort: 80 44 | hostPort: 80 45 | - name: https 46 | containerPort: 443 47 | hostPort: 443 48 | - name: admin 49 | containerPort: 8080 50 | args: 51 | - --configfile=/config/traefik.toml 52 | - --api 53 | - --kubernetes 54 | - --logLevel=INFO 55 | --- 56 | kind: Service 57 | apiVersion: v1 58 | metadata: 59 | name: traefik-ingress-service 60 | namespace: kube-system 61 | spec: 62 | selector: 63 | k8s-app: traefik-ingress-lb 64 | ports: 65 | - protocol: TCP 66 | port: 80 67 | name: web 68 | - protocol: TCP 69 | port: 8080 70 | name: admin 71 | type: NodePort 72 | -------------------------------------------------------------------------------- /wordpress/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | wordpress: 5 | image: wordpress 6 | ports: 7 | - 80:80 8 | networks: 9 | - overlay 10 | environment: 11 | WORDPRESS_DB_HOST: db:3306 12 | WORDPRESS_DB_USER: wordpress 13 | WORDPRESS_DB_PASSWORD: wordpress 14 | deploy: 15 | mode: replicated 16 | replicas: 3 17 | 18 | db: 19 | image: mysql:5.7 20 | networks: 21 | - overlay 22 | volumes: 23 | - db-data:/var/lib/mysql 24 | environment: 25 | MYSQL_ROOT_PASSWORD: your-mysql-root-password 26 | MYSQL_DATABASE: wordpress 27 | MYSQL_USER: wordpress 28 | MYSQL_PASSWORD: wordpress 29 | deploy: 30 | placement: 31 | constraints: [node.role == manager] 32 | 33 | visualizer: 34 | image: dockersamples/visualizer:stable 35 | ports: 36 | - "8080:8080" 37 | stop_grace_period: 1m30s 38 | volumes: 39 | - "/var/run/docker.sock:/var/run/docker.sock" 40 | deploy: 41 | placement: 42 | constraints: [node.role == manager] 43 | 44 | volumes: 45 | db-data: 46 | networks: 47 | overlay: 48 | -------------------------------------------------------------------------------- /wordpress/wordpress-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysql 5 | namespace: blog 6 | labels: 7 | app: mysql 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: mysql 12 | template: 13 | metadata: 14 | labels: 15 | app: mysql 16 | spec: 17 | containers: 18 | - name: mysql 19 | image: mysql:5.7 20 | args: 21 | - --default_authentication_plugin=mysql_native_password 22 | - --character-set-server=utf8mb4 23 | - --collation-server=utf8mb4_unicode_ci 24 | ports: 25 | - containerPort: 3306 26 | name: dbport 27 | env: 28 | - name: MYSQL_ROOT_PASSWORD 29 | value: rootPassW0rd 30 | - name: MYSQL_DATABASE 31 | value: wordpress 32 | - name: MYSQL_USER 33 | value: wordpress 34 | - name: MYSQL_PASSWORD 35 | value: wordpress 36 | volumeMounts: 37 | - name: db 38 | mountPath: /var/lib/mysql 39 | volumes: 40 | - name: db 41 | hostPath: 42 | path: /var/lib/mysql 43 | 44 | --- 45 | apiVersion: v1 46 | kind: Service 47 | metadata: 48 | name: mysql 49 | namespace: blog 50 | spec: 51 | selector: 52 | app: mysql 53 | ports: 54 | - name: mysqlport 55 | protocol: TCP 56 | port: 3306 57 | targetPort: dbport 58 | 59 | --- 60 | apiVersion: apps/v1 61 | kind: Deployment 62 | metadata: 63 | name: wordpress 64 | namespace: blog 65 | labels: 66 | app: wordpress 67 | spec: 68 | selector: 69 | matchLabels: 70 | app: wordpress 71 | minReadySeconds: 5 72 | strategy: 73 | type: RollingUpdate 74 | rollingUpdate: 75 | maxSurge: 1 76 | maxUnavailable: 1 77 | template: 78 | metadata: 79 | labels: 80 | app: wordpress 81 | spec: 82 | initContainers: 83 | - name: init-db 84 | image: busybox 85 | command: ['sh', '-c', 'until nslookup mysql; do echo waiting for mysql service; sleep 2; done;'] 86 | containers: 87 | - name: wordpress 88 | image: wordpress 89 | imagePullPolicy: IfNotPresent 90 | ports: 91 | - containerPort: 80 92 | name: wdport 93 | env: 94 | - name: WORDPRESS_DB_HOST 95 | value: mysql:3306 96 | - name: WORDPRESS_DB_USER 97 | value: wordpress 98 | - name: WORDPRESS_DB_PASSWORD 99 | value: wordpress 100 | readinessProbe: 101 | tcpSocket: 102 | port: 80 103 | initialDelaySeconds: 5 104 | periodSeconds: 10 105 | resources: 106 | limits: 107 | cpu: 200m 108 | memory: 256Mi 109 | requests: 110 | cpu: 100m 111 | memory: 100Mi 112 | 113 | --- 114 | apiVersion: v1 115 | kind: Service 116 | metadata: 117 | name: wordpress 118 | namespace: blog 119 | spec: 120 | selector: 121 | app: wordpress 122 | type: NodePort 123 | ports: 124 | - name: wordpressport 125 | protocol: TCP 126 | port: 80 127 | nodePort: 32255 128 | targetPort: wdport -------------------------------------------------------------------------------- /wordpress/wordpress-db.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysql-deploy 5 | namespace: blog 6 | labels: 7 | app: mysql 8 | spec: 9 | selector: 10 | matchLabel: 11 | app: mysql 12 | template: 13 | metadata: 14 | labels: 15 | app: mysql 16 | spec: 17 | containers: 18 | - name: mysql 19 | image: mysql:5.7 20 | imagePullPolicy: IfNotPresent 21 | args: 22 | - --default_authentication_plugin=mysql_native_password 23 | - --character-set-server=utf8mb4 24 | - --collation-server=utf8mb4_unicode_ci 25 | ports: 26 | - containerPort: 3306 27 | name: dbport 28 | env: 29 | - name: MYSQL_ROOT_PASSWORD 30 | value: rootPassw0rd 31 | - name: MYSQL_DATABASE 32 | value: wordpress 33 | - name: MYSQL_USER 34 | value: wordpress 35 | - name: MYSQL_PASSWORD 36 | value: wordpress 37 | volumeMounts: 38 | - name: db 39 | mountPath: /var/lib/mysql 40 | volumes: 41 | - name: db 42 | hostPath: 43 | path: /var/lib/mysql 44 | 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: mysql 50 | namespace: blog 51 | spec: 52 | selector: 53 | app: mysql 54 | ports: 55 | - name: mysqlport 56 | protocol: TCP 57 | port: 3306 58 | targetPort: dbport -------------------------------------------------------------------------------- /wordpress/wordpress-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: wordpress 5 | namespace: blog 6 | spec: 7 | containers: 8 | - name: wordpress 9 | image: wordpress 10 | imagePullPolicy: IfNotPresent 11 | ports: 12 | - containerPort: 80 13 | name: wdport 14 | env: 15 | - name: WORDPRESS_DB_HOST 16 | value: localhost:3306 17 | - name: WORDPRESS_DB_USER 18 | value: wordpress 19 | - name: WORDPRESS_DB_PASSWORD 20 | value: wordpress 21 | - name: mysql 22 | image: mysql:5.7 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - containerPort: 3306 26 | name: dbport 27 | env: 28 | - name: MYSQL_ROOT_PASSWORD 29 | value: rootPassw0rd 30 | - name: MYSQL_DATABASE 31 | value: wordpress 32 | - name: MYSQL_USER 33 | value: wordpress 34 | - name: MYSQL_PASSWORD 35 | value: wordpress 36 | volumeMounts: 37 | - name: db 38 | mountPath: /var/lib/mysql 39 | volumes: 40 | - name: db 41 | hostPath: 42 | path: /var/lib/mysql 43 | -------------------------------------------------------------------------------- /wordpress/wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: wordpress-deploy 5 | namespace: blog 6 | labels: 7 | app: wordpress 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: wordpress 12 | strategy: 13 | type: RollingUpdate 14 | rollingUpdate: 15 | maxSurge: 1 16 | maxUnavailable: 1 17 | template: 18 | metadata: 19 | labels: 20 | app: wordpress 21 | spec: 22 | initContainers: 23 | - name: init-db 24 | image: busybox 25 | imagePullPolicy: IfNotPresent 26 | command: ['sh', '-c', 'until nslookup mysql;do echo waiting for mysql service; sleep 2; done;'] 27 | containers: 28 | - name: wordpress 29 | image: wordpress 30 | imagePullPolicy: IfNotPresent 31 | ports: 32 | - name: wdport 33 | containerPort: 80 34 | livenessProbe: 35 | tcpSocket: 36 | port: 80 37 | initialDelaySeconds: 10 38 | perioidSeconds: 3 39 | readinessProbe: 40 | tcpSocket: 41 | port: 80 42 | initialDelaySeconds: 15 43 | perioidSeconds: 10 44 | resources: 45 | limits: 46 | cpu: 200m 47 | memory: 200Mi 48 | requests: 49 | cpu: 100m 50 | memory: 100Mi 51 | env: 52 | - name: WORDPRESS_DB_HOST 53 | value: mysql:3306 54 | - name: WORDPRESS_DB_USER 55 | value: wordpress 56 | - name: WORDPRESS_DB_PASSWORD 57 | value: wordpress 58 | 59 | --- 60 | apiVersion: v1 61 | kind: Service 62 | metadata: 63 | name: wordpress 64 | namespace: blog 65 | spec: 66 | selector: 67 | app: wordpress 68 | type: NodePort 69 | ports: 70 | - name: wordpressport 71 | protocol: TCP 72 | port: 80 73 | targetPort: wdport -------------------------------------------------------------------------------- /yaml/pod-example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: haimaxy-pod 6 | labels: 7 | app: web 8 | spec: 9 | containers: 10 | - name: frontend 11 | image: nginx 12 | ports: 13 | - containerPort: 80 14 | 15 | # Pod example yaml 16 | -------------------------------------------------------------------------------- /yaml/test1.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod" 4 | } -------------------------------------------------------------------------------- /yaml/test1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | -------------------------------------------------------------------------------- /yaml/test2.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "haimaxy-pod", 6 | "labels": { 7 | "app": "web" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /yaml/test2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: haimaxy-pod 6 | labels: 7 | app: web 8 | -------------------------------------------------------------------------------- /yaml/test3.json: -------------------------------------------------------------------------------- 1 | { 2 | "args": ["a", "b", "c", "d"] 3 | } -------------------------------------------------------------------------------- /yaml/test3.yml: -------------------------------------------------------------------------------- 1 | args: 2 | - a 3 | - b 4 | - c 5 | - d 6 | -------------------------------------------------------------------------------- /yaml/test4.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "haimaxy-pod", 6 | "labels": { 7 | "app": "web" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "name": "frontend", 14 | "image": "nginx", 15 | "ports": [ 16 | {"containerPort": 80} 17 | ] 18 | }, 19 | { 20 | "name": "demo", 21 | "image": "demo", 22 | "ports": [ 23 | {"containerPort": 5000} 24 | ] 25 | } 26 | ] 27 | } 28 | } -------------------------------------------------------------------------------- /yaml/test4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: haimaxy-pod 6 | labels: 7 | app: web 8 | spec: 9 | containers: 10 | - name: frontend 11 | image: nginx 12 | ports: 13 | - containerPort: 80 14 | 15 | - name: demo 16 | image: demo 17 | ports: 18 | - containerPort: 5000 19 | --------------------------------------------------------------------------------