├── .circleci
└── config.yml
├── .gitconfig
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── stale.yml
├── .gitignore
├── BreezeManual-CN.md
├── Dockerfile
├── LICENSE
├── README-CN.md
├── README.md
├── TroubleShooting-CN.md
├── callback_plugins
└── log_back.py
├── components_order.conf
├── crio-playbook
└── version
│ ├── ansible.cfg
│ ├── crio.ansible
│ ├── file
│ └── README.md
│ ├── group_vars
│ └── README.md
│ ├── inherent.yaml
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ ├── scripts
│ ├── check_crio.sh
│ ├── check_environment.sh
│ ├── fix-crio-bug.sh
│ └── fix-ubuntu-docker-warning.sh
│ ├── template
│ ├── chrony
│ │ ├── redhat-centos
│ │ │ ├── chrony-client.conf.j2
│ │ │ └── chrony-server.conf.j2
│ │ ├── ubuntu20
│ │ │ ├── chrony-client.conf.j2
│ │ │ └── chrony-server.conf.j2
│ │ └── ubuntu22
│ │ │ ├── chrony-client.conf.j2
│ │ │ └── chrony-server.conf.j2
│ ├── ipvs.conf.j2
│ ├── registries.conf.https.j2
│ ├── registries.conf.j2
│ ├── wise2c-centos8.repo.j2
│ ├── wise2c-centos9.repo.j2
│ ├── wise2c.list-ubuntu20.j2
│ └── wise2c.list-ubuntu22.j2
│ └── yat
│ ├── hosts.gotmpl
│ └── hosts.yml.gotmpl
├── docker-compose-aliyun.yml
├── docker-compose-centos-aliyun.yml
├── docker-compose-centos.yml
├── docker-compose-ubuntu-aliyun.yml
├── docker-compose-ubuntu.yml
├── docker-compose.yml
├── docker-playbook
└── version
│ ├── ansible.cfg
│ ├── docker.ansible
│ ├── file
│ └── README.md
│ ├── group_vars
│ └── README.md
│ ├── inherent.yaml
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ ├── scripts
│ ├── check_docker.sh
│ ├── check_environment.sh
│ └── fix-ubuntu-docker-warning.sh
│ ├── template
│ ├── chrony
│ │ ├── redhat-centos
│ │ │ ├── chrony-client.conf.j2
│ │ │ └── chrony-server.conf.j2
│ │ ├── ubuntu16
│ │ │ ├── chrony-client.conf.j2
│ │ │ └── chrony-server.conf.j2
│ │ └── ubuntu18
│ │ │ ├── chrony-client.conf.j2
│ │ │ └── chrony-server.conf.j2
│ ├── daemon.json.https.j2
│ ├── daemon.json.j2
│ ├── ipvs.conf.j2
│ ├── wise2c.list-ubuntu20.j2
│ ├── wise2c.list-ubuntu22.j2
│ └── wise2c.repo.j2
│ └── yat
│ ├── hosts.gotmpl
│ └── hosts.yml.gotmpl
├── elasticcloud-playbook
└── version
│ ├── ansible.cfg
│ ├── file
│ ├── README.md
│ ├── deploy.sh
│ └── remove.sh
│ ├── group_vars
│ ├── README.md
│ └── elasticcloud.yml
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ ├── template
│ ├── eck.yml.j2
│ ├── elasticsearch-service.yml.j2
│ ├── elasticsearch.yml.j2
│ ├── filebeat.yml.j2
│ ├── fluentd.yml.j2
│ ├── kibana-service.yml.j2
│ └── kibana.yml.j2
│ └── yat
│ ├── README.md
│ ├── all.yml.gotmpl
│ └── hosts.gotmpl
├── etcd-playbook
└── version-by-kubeadm
│ ├── ansible.cfg
│ ├── file
│ ├── README.md
│ ├── backup-etcd-database-folder.sh
│ ├── ca-config.json
│ ├── ca-csr.json
│ ├── etcd-csr.json
│ ├── etcd-healthcheck.sh
│ ├── generate-etcd-certificates.sh
│ └── make-etcd-snapshot.sh
│ ├── group_vars
│ └── etcd.yml
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ └── yat
│ ├── all.yml.gotmpl
│ └── hosts.gotmpl
├── harbor-playbook
├── external
│ ├── ansible.cfg
│ ├── file
│ │ └── README.md
│ ├── group_vars
│ │ └── README.md
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ ├── template
│ │ └── README.md
│ └── yat
│ │ ├── harbor.yml.gotmpl
│ │ └── hosts.gotmpl
└── version
│ ├── ansible.cfg
│ ├── file
│ ├── README.md
│ ├── install-harbor.sh
│ ├── start-harbor.sh
│ └── stop-harbor.sh
│ ├── group_vars
│ └── README.md
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ ├── template
│ └── README.md
│ └── yat
│ ├── harbor.yml.gotmpl
│ └── hosts.gotmpl
├── init.sh
├── istio-playbook
└── version
│ ├── ansible.cfg
│ ├── file
│ ├── README.md
│ ├── clean-images-tags.sh
│ ├── deploy.sh
│ ├── harbor-address.txt
│ └── remove.sh
│ ├── group_vars
│ ├── README.md
│ └── istio.yml
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── properties.json
│ ├── reset.ansible
│ ├── template
│ ├── README.md
│ ├── grafana-service.yaml.j2
│ ├── jaeger-service.yaml.j2
│ ├── kiali-service.yaml.j2
│ └── prometheus-service.yaml.j2
│ └── yat
│ ├── all.yml.gotmpl
│ └── hosts.gotmpl
├── kubernetes-playbook
└── version
│ ├── ansible.cfg
│ ├── both.ansible
│ ├── copy-upgrade-scripts.ansible
│ ├── file
│ ├── admin-csr.json
│ ├── apiserver-csr.json
│ ├── ca-config.json
│ ├── ca-csr.json
│ ├── controller-manager-csr.json
│ ├── front-proxy-ca-csr.json
│ ├── front-proxy-client-csr.json
│ ├── generate-apiserver-certificate.sh
│ ├── generate-other-certificates.sh
│ ├── ipcalc-0.41.tar.gz
│ ├── ipcalc-url.txt
│ ├── kube-proxy-ds-patch.yaml
│ ├── kubeadm-generate-join-command.sh
│ ├── kubeadm-token.sh
│ ├── kubelet-csr.json
│ ├── metrics-server
│ │ ├── aggregated-metrics-reader.yaml
│ │ ├── auth-delegator.yaml
│ │ ├── auth-reader.yaml
│ │ ├── metrics-apiservice.yaml
│ │ ├── metrics-server-service.yaml
│ │ └── resource-reader.yaml
│ ├── patch-kubeconfig.sh
│ ├── patch-kubelet-conf.sh
│ ├── prometheus-fix-master-nodes-ubuntu.sh
│ ├── prometheus-fix-master-nodes.sh
│ ├── prometheus-fix-worker-nodes-ubuntu.sh
│ ├── prometheus-fix-worker-nodes.sh
│ ├── scheduler-csr.json
│ └── update-api-advertise-address.sh
│ ├── group_vars
│ └── README.md
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── master-node.ansible
│ ├── properties.json
│ ├── push-images.ansible
│ ├── reset.ansible
│ ├── scripts
│ ├── check_kubelet_kubeproxy.sh
│ ├── disable-swap.sh
│ └── taint.sh
│ ├── template
│ ├── kubeadm.conf.j2
│ ├── kubelet.conf.j2
│ ├── kubernetes-dashboard-svc.yml.j2
│ ├── max-scale.yml.j2
│ ├── metrics-server-deployment.yaml.j2
│ ├── service_cidr.txt.j2
│ └── upgrade
│ │ ├── redhat-centos
│ │ ├── upgrade_first_master_node.sh.j2
│ │ ├── upgrade_other_master_nodes.sh.j2
│ │ └── upgrade_worker_nodes.sh.j2
│ │ └── ubuntu
│ │ ├── upgrade_first_master_node.sh.j2
│ │ ├── upgrade_other_master_nodes.sh.j2
│ │ └── upgrade_worker_nodes.sh.j2
│ ├── worker-node.ansible
│ └── yat
│ ├── all.yml.gotmpl
│ └── hosts.gotmpl
├── loadbalancer-playbook
└── version
│ ├── ansible.cfg
│ ├── file
│ ├── README.md
│ └── keepalived.sh
│ ├── group_vars
│ └── README.md
│ ├── inherent.yaml
│ ├── init.sh
│ ├── install.ansible
│ ├── keepalived
│ ├── Dockerfile.aarch64
│ ├── Dockerfile.amd64
│ ├── keepalived.conf
│ └── keepalived.sh
│ ├── properties.json
│ ├── reset.ansible
│ ├── template
│ ├── haproxy.cfg.j2
│ ├── keepalive.cfg.j2
│ └── readme.txt
│ └── yat
│ ├── all.yml.gotmpl
│ ├── hosts.gotmpl
│ └── hosts.yml.gotmpl
├── manual
├── Add-Istio-001.png
├── Add-Istio-002.png
├── AddWorkerNodes.png
├── Alertmanager.png
├── Breeze-Logo.ai
├── Breeze-Logo.png
├── BreezeCNCF.png
├── BreezeLogo.png
├── BreezeManual-CN.pdf
├── BreezeManual.pdf
├── BreezeScreenShots001.png
├── BreezeScreenShots002.png
├── BreezeScreenShots003.png
├── BreezeScreenShots004.png
├── BreezeScreenShots005.png
├── BreezeScreenShots006.png
├── BreezeScreenShots007.png
├── BreezeScreenShots008.png
├── BreezeScreenShots009.png
├── BreezeScreenShots010.png
├── BreezeScreenShots011.png
├── BreezeScreenShots012.png
├── BreezeScreenShots013.png
├── BreezeScreenShots014.png
├── BreezeScreenShots015.png
├── BreezeScreenShots016.png
├── BreezeScreenShots017.png
├── BreezeScreenShots018.png
├── BreezeScreenShots019.png
├── BreezeScreenShots020.png
├── BreezeScreenShots021.png
├── BreezeScreenShots022.png
├── BreezeScreenShots023.png
├── BreezeScreenShots024.png
├── BreezeScreenShots025.png
├── BreezeScreenShots026.png
├── BreezeScreenShots027.png
├── BreezeScreenShots028.png
├── BreezeScreenShots029.png
├── BreezeScreenShots030.png
├── BreezeScreenShots031.png
├── BreezeScreenShots032.png
├── BreezeScreenShotsExternalHarbor.png
├── BreezeVersionSelect.png
├── ClusterCheck.png
├── Grafana.png
├── Install-Istio.png
├── Istio-Grafana.png
├── Istio-Jaeger.png
├── Istio-Kiali-001.png
├── Istio-Kiali-002.png
├── Istio-Prometheus.png
├── Kubernetes-HA-Breeze.png
├── KubernetesDashboard-001.png
├── KubernetesDashboard-002.png
├── KubernetesDashboard-003.png
├── KubernetesHA.png
├── Prometheus.png
├── README.md
├── SelectDockerWorkerNodes.png
├── SelectTag.png
├── Wise2C-Breeze-Architecture.png
├── With-Istio.png
├── haproxy-keepalived-001.png
├── haproxy-keepalived-002.png
└── prometheus-role.png
└── prometheus-playbook
└── version
├── ansible.cfg
├── file
├── README.md
├── add-on.sh
├── append-lines.txt
├── clean-images-tags.sh
├── coredns.yaml
├── deploy.sh
├── etcd-address.txt
├── etcd.yaml
├── harbor-address.txt
├── kube-controller-manager.yaml
├── kube-scheduler.yaml
├── node-ports.txt
└── remove.sh
├── group_vars
├── README.md
└── prometheus.yml
├── inherent.yaml
├── init.sh
├── install.ansible
├── properties.json
├── reset.ansible
├── template
├── README.md
├── alertmanager-service.yaml.j2
├── grafana-service.yaml.j2
└── prometheus-service.yaml.j2
└── yat
├── all.yml.gotmpl
└── hosts.gotmpl
/.gitconfig:
--------------------------------------------------------------------------------
1 | [user]
2 | email = peng.alan@gmail.com
3 | name = Alan Peng
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 |
5 | ---
6 |
7 | **Describe the bug**
8 | A clear and concise description of what the bug is.
9 |
10 | **To Reproduce**
11 | Steps to reproduce the behavior:
12 | 1. Go to '...'
13 | 2. Click on '....'
14 | 3. Scroll down to '....'
15 | 4. See error
16 |
17 | **Expected behavior**
18 | A clear and concise description of what you expected to happen.
19 |
20 | **Screenshots**
21 | If applicable, add screenshots to help explain your problem.
22 |
23 | **Desktop (please complete the following information):**
24 | - OS: [e.g. iOS]
25 | - Browser [e.g. chrome, safari]
26 | - Version [e.g. 22]
27 |
28 | **Smartphone (please complete the following information):**
29 | - Device: [e.g. iPhone6]
30 | - OS: [e.g. iOS8.1]
31 | - Browser [e.g. stock browser, safari]
32 | - Version [e.g. 22]
33 |
34 | **Additional context**
35 | Add any other context about the problem here.
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 |
5 | ---
6 |
7 | **Is your feature request related to a problem? Please describe.**
8 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
9 |
10 | **Describe the solution you'd like**
11 | A clear and concise description of what you want to happen.
12 |
13 | **Describe alternatives you've considered**
14 | A clear and concise description of any alternative solutions or features you've considered.
15 |
16 | **Additional context**
17 | Add any other context or screenshots about the feature request here.
18 |
--------------------------------------------------------------------------------
/.github/stale.yml:
--------------------------------------------------------------------------------
1 | # Configuration for probot-stale - https://github.com/probot/stale
2 |
3 | # General configuration
4 | # Label to use when marking as stale
5 | staleLabel: stale
6 |
7 | # Pull request specific configuration
8 | pulls:
9 | # Number of days of inactivity before an Issue or Pull Request becomes stale
10 | daysUntilStale: 14
11 | # Number of days of inactivity before a stale Issue or Pull Request is closed.
12 | # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
13 | daysUntilClose: 30
14 | # Comment to post when marking as stale. Set to `false` to disable
15 | markComment: >
16 | This pull request has been automatically marked as stale because it has not had
17 | activity in the last 2 weeks. It will be closed in 30 days if no further activity occurs. Please
18 | feel free to give a status update now, ping for review, or re-open when it's ready.
19 | Thank you for your contributions!
20 | # Comment to post when closing a stale Issue or Pull Request.
21 | closeComment: >
22 | This pull request has been automatically closed because it has not had
23 | activity in the last 30 days. Please feel free to give a status update now, ping for review, or re-open when it's ready.
24 | Thank you for your contributions!
25 | # Limit the number of actions per hour, from 1-30. Default is 30
26 | limitPerRun: 1
27 |
28 | # Issue specific configuration
29 | issues:
30 | # TODO: Consider increasing the limitPerRun once we are satisfied with the bot's performance
31 | limitPerRun: 1
32 | daysUntilStale: 30
33 | daysUntilClose: 7
34 | markComment: >
35 | This issue has been automatically marked as stale because it has not had activity in the
36 | last 90 days. It will be closed in the next 30 days unless it is tagged "help wanted" or other activity
37 | occurs. Thank you for your contributions.
38 | closeComment: >
39 | This issue has been automatically closed because it has not had activity in the
40 | last month and a half. If this issue is still valid, please ping a maintainer and ask them to label it as "help wanted".
41 | Thank you for your contributions.
42 | exemptLabels:
43 | - feature requested
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.retry
2 | *.tar
3 | *.tar.bz2
4 | *.tgz
5 | *.pyc
6 |
7 | # ===== mian =====
8 | hosts
9 | hosts.yml
10 | all.yml
11 | admin.conf
12 | controller.yml
13 | registry.yml
14 | registry-playbook/file/source/
15 | cluster-*
16 | !cluster-template
17 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM busybox:latest
2 |
3 | WORKDIR /workspace
4 |
5 | COPY callback_plugins /workspace/callback_plugins
6 | COPY docker-playbook /workspace/docker-playbook
7 | COPY crio-playbook /workspace/crio-playbook
8 | COPY etcd-playbook /workspace/etcd-playbook
9 | COPY kubernetes-playbook /workspace/kubernetes-playbook
10 | COPY harbor-playbook /workspace/harbor-playbook
11 | COPY loadbalancer-playbook /workspace/loadbalancer-playbook
12 | COPY prometheus-playbook /workspace/prometheus-playbook
13 | COPY istio-playbook /workspace/istio-playbook
14 | COPY elasticcloud-playbook /workspace/elasticcloud-playbook
15 | COPY components_order.conf /workspace
16 |
--------------------------------------------------------------------------------
/README-CN.md:
--------------------------------------------------------------------------------
1 | # Breeze
2 | - 可用于生产环境的图形化Kubernetes集群部署工具
3 |
4 | [](https://gitlab.com/alanpeng/breeze/-/commits/master)
5 | [](https://github.com/wise2c-devops/breeze/blob/master/LICENSE)
6 |
7 | [English](./README.md) | [中文](./README-CN.md)
8 |
9 | **敬请留意**: 分支代码可能正处于开发状态而不稳定。请选择 [releases](https://github.com/wise2c-devops/breeze/releases) 用于正式环境的部署。
10 |
11 | 请阅读 **[部署指南](./BreezeManual-CN.md)** 获得更详细的Breeze使用操作指引。
12 |
13 |
14 |
15 | Breeze项目旨在提供一个可信的、安全的、稳定的Kubernetes集群部署工具,它可以帮助您通过图形化操作界面快捷地在生产环境部署一个或多个Kubernetes集群,而不需要连入互联网环境。作为云原生产品安装工具,Breeze被列入[CNCF Cloud Native Interactive Landscape](https://landscape.cncf.io/category=certified-kubernetes-installer&format=card-mode&selected=wise2-c-technology-breeze),其品质值得信赖。
16 |
17 |
18 |
19 | ## 功能
20 | * **运行简单**: Breeze将部署Kubernetes集群所需的全部资源文件打包在一个docker镜像内,这包括Kubernetes的组件镜像、docker、etcd、harbor、kubernetes集群部署的ansible playbook脚本文件等。同时,Breeze部署主机自身也作为一个RHEL/CentOS的yum或Ubuntu的apt仓库服务器角色存在,因此,您只需准备一台安装了docker和docker-compose命令的主机即可轻松的使Breeze运行起来并进行Kubernetes集群的部署。
21 |
22 | * **简化Kubernetes集群部署流程**: 仅需几条简单命令,就能使Breeze程序运行起来,接下来的Kubernetes集群部署工作全都通过图形化操作界面完成。
23 |
24 | * **支持离线部署**: 在仅有的5个镜像(playbook, yum-repo/apt-source, pagoda, deploy-ui) 被加载在Breeze部署主机之后,所有操作都不需要互联网的访问。Breeze自身作为RHEL/CentOS的yum仓库或Ubuntu的apt仓库对被部署机提供yum/apt源服务并使用kubeadm进行Kubernetes的部署工作,同时Breeze还会部署一个Harbor服务器用于内网的镜像下载服务。
25 |
26 | * **支持多个集群批量部署**: Breeze支持批量部署多个Kubernetes集群。
27 |
28 | * **支持高可用架构**: 使用Breeze部署的Kubernetes集群,默认提供3个master节点和3个etcd节点, 结合haproxy+keepalived架构服务,所有工作节点都使用虚拟浮动IP地址和主节点服务器通信。
29 |
30 | ## 架构原理图
31 | 
32 |
33 | 
34 |
35 | ## 组件
36 | - **breeze**: 用于部署docker, harbor, haproxy+keepalived, etcd, kubernetes等组件的Ansible playbook。
37 |
38 | - **yum-repo**: 用于RHEL/CentOS安装docker, docker-compose, kubelet, kubectl, kubeadm, kubernetes-cni等的yum仓库源。
39 |
40 | - **apt-source**: 用于Ubuntu安装docker, docker-compose, kubelet, kubectl, kubeadm, kubernetes-cni等的apt仓库源。
41 |
42 | - **deploy-ui**: 图形界面组件。
43 |
44 | - **pagoda**: 提供操作Ansible playbook的API。
45 |
46 | - **kubeadm-version**: 获取Kubernetes组件版本清单,采用命令"kubeadm config"
47 |
48 | ## 安装 & 运行
49 |
50 | **系统要求:**
51 |
52 | **部署机:** docker 1.13.1+ and docker-compose 1.12.0+ .
53 |
54 | **Kubernetes集群节点:** 兼容RHEL/CentOS/RockyLinux/AlmaLinux/OracleLinux 8.4+ 、Ubuntu 20/22 LTS 以及 openEuler 22.03 LTS SP3 版本,Minimal安装模式是推荐的方式,为了确保部署的顺利进行,应尽可能保证系统的干净。
55 |
56 | 请阅读 **[部署指南](./BreezeManual-CN.md)** 获得更详细的Breeze使用操作指引。
57 |
58 | ## 问题反馈沟通
59 |
60 | * **微信:** 关注微信公众号【Wise2C】后回复【进群】,睿云小助手会第一时间把拉你进入【 K8s企业落地实践群】,关于Breeze部署工具的问题以及建议我们在群里讨论!
61 |
62 | 常见排错说明在此:
63 | https://github.com/wise2c-devops/breeze/blob/master/TroubleShooting-CN.md
64 |
65 | ## 开源许可
66 |
67 | Breeze开源项目遵循 [Apache 2 license](LICENSE) 细节。
68 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Breeze
2 | - Deploy a Production Ready Kubernetes Cluster with graphical interface
3 |
4 | [](https://gitlab.com/alanpeng/breeze/-/commits/master)
5 | [](https://github.com/wise2c-devops/breeze/blob/master/LICENSE)
6 |
7 | [English](./README.md) | [中文](./README-CN.md)
8 |
9 | **Note**: Branches may be in an *unstable or even broken state* during development.
10 | Please use [releases](https://github.com/wise2c-devops/breeze/releases) instead of those branches in order to get stable binaries.
11 |
12 | Refer to **[User Guide](manual/BreezeManual.pdf)** for more details on how to use Breeze.
13 |
14 |
15 |
16 | Project Breeze is an open source trusted solution allow you to create Kubernetes clusters on your internal, secure, cloud network with graphical user interface. As a cloud native installer project, Breeze is listed in [CNCF Cloud Native Interactive Landscape](https://landscape.cncf.io/category=certified-kubernetes-installer&format=card-mode&selected=wise2-c-technology-breeze).
17 |
18 |
19 |
20 | ## Features
21 | * **Easy to run**: Breeze combines all resources you need such as kubernetes components images, ansible playbooks for the deployment of kubernetes clusters into a single docker image (wise2c/playbook). It also works as a local RHEL/CentOS yum and Ubuntu apt repository server. You just need a linux server with docker and docker-compose installed to run Breeze.
22 |
23 | * **Simplified the process of kubernetes clusters deployment**: With a few simple commands, you can get Breeze running, and then finish all the other deployment processes by the graphical interface.
24 |
25 | * **Support offline deployment**: After 5 images (playbook, yum-repo/apt-source, pagoda, deploy-ui) have been loaded on the deploy server, kubernetes clusters can be setup without internet access. Breeze works as a yum/apt repository server and deploys a local Harbor registry and uses kubeadm to setup kubernetes clusters. All docker images will be pulled from the local Harbor registry.
26 |
27 | * **Support multi-cluster**: Breeze supports multiple kubernetes clusters deployment.
28 |
29 | * **Support high available architecture**: With Breeze, you can setup kubernetes clusters with 3 master servers and 3 etcd servers combined with haproxy and keepalived. All worker nodes will use the virtual floating ip address to communicate with the master servers.
30 |
31 | ## Architecture
32 | 
33 |
34 | 
35 |
36 | ## Components
37 | - **breeze**: Ansible playbook for deployments of docker, harbor, haproxy+keepalived, etcd, kubernetes.
38 |
39 | - **yum-repo**: RHEL/CentOS yum repository for docker, docker-compose, kubelet, kubectl, kubeadm, kubernetes-cni etc,.
40 |
41 | - **apt-source**: Ubuntu apt source repository for docker, docker-compose, kubelet, kubectl, kubeadm, kubernetes-cni etc,.
42 |
43 | - **deploy-ui**: Graphical user interface.
44 |
45 | - **pagoda**: Server offers the API to operate Ansible playbooks.
46 |
47 | - **kubeadm-version**: Get k8s components images version list by command "kubeadm config"
48 |
49 | ## Install & Run
50 |
51 | **System requirements:**
52 |
53 | **Deploy server:** docker 1.13.1+ and docker-compose 1.12.0+ .
54 |
55 | **Kubernetes cluster server:** RHEL/CentOS/RockyLinux/AlmaLinux/OracleLinux 8.4+ or Ubuntu 20/22 LTS or openEuler 22.03 LTS is required and minimal installation mode is recommended.
56 |
57 | Refer to **[User Guide](manual/BreezeManual.pdf)** for more details on how to use Breeze.
58 |
59 | ## Community
60 |
61 | * **Slack:** Join Breeze's community for discussion and ask questions: [Breeze Slack](https://wise2c-breeze.slack.com/), channel: #general
62 |
63 | ## License
64 |
65 | Breeze is available under the [Apache 2 license](LICENSE).
66 |
--------------------------------------------------------------------------------
/components_order.conf:
--------------------------------------------------------------------------------
1 | crio
2 | harbor
3 | loadbalancer
4 | etcd
5 | kubernetes
6 | prometheus
7 | istio
8 | elasticcloud
9 |
--------------------------------------------------------------------------------
/crio-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/crio-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/crio-playbook/version/file/README.md
--------------------------------------------------------------------------------
/crio-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/crio-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/crio-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/crio-playbook/version/install.ansible:
--------------------------------------------------------------------------------
1 | - name: init host
2 | hosts: hosts
3 | user: root
4 | any_errors_fatal: true
5 | vars:
6 | path: /var/lib/wise2c/tmp/crio
7 | tasks:
8 | - name: check environment
9 | script: scripts/check_environment.sh
10 | register: check_env_output
11 | environment:
12 | BREEZE_LSB_ID: "{{ ansible_facts.distribution }}"
13 | BREEZE_LSB_RELEASE: "{{ ansible_facts.distribution_version }}"
14 | BREEZE_PYTHON_VERSION: "{{ ansible_facts.python_version }}"
15 |
16 | - name: exit
17 | fail:
18 | msg: "{{ check_env_output.stdout }}"
19 | when: check_env_output.stdout != "true"
20 |
21 | - name: get seed ip
22 | shell:
23 | echo $SSH_CONNECTION | cut -d " " -f 1
24 | register: ip
25 |
26 | - name: add seed to /etc/hosts
27 | blockinfile:
28 | path: /etc/hosts
29 | block: '{{ ip.stdout }} {{ wise2c_seed_host }}'
30 | marker: '# {mark} WISE2C DEPLOY MANAGED BLOCK {{ wise2c_seed_host }}'
31 |
32 | - name: add to /etc/hosts
33 | blockinfile:
34 | path: /etc/hosts
35 | block: '{{ item.key }} {{ item.value.hostname }}'
36 | marker: "# {mark} WISE2C DEPLOY MANAGED BLOCK {{ item.key }}"
37 | with_dict: "{{ hostvars }}"
38 |
39 | - name: check crio
40 | script: scripts/check_crio.sh {{ harbor }}
41 | register: check_output
42 |
43 | - name: setup crio on all nodes
44 | include_tasks: crio.ansible
45 | when: check_output.stdout != 'true'
46 |
--------------------------------------------------------------------------------
/crio-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "format_hostname",
4 | "label": "change host name",
5 | "description": "Change all server host name to the name defined in UI",
6 | "type": "bool",
7 | "default": "false",
8 | "required": true
9 | }
10 | ]
11 |
--------------------------------------------------------------------------------
/crio-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean crio
2 | hosts: hosts
3 | user: root
4 | tasks:
5 | - name: remove crio for Redhat/CentOS
6 | yum:
7 | disablerepo: '*'
8 | enablerepo: wise2c-crio
9 | state: absent
10 | name: '{{ item }}'
11 | with_items:
12 | - crio
13 | - podman
14 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS")
15 |
16 | - name: remove crio for Ubuntu
17 | apt:
18 | state: absent
19 | name: '{{ item }}'
20 | with_items:
21 | - cri-o
22 | - podman
23 | when: ansible_distribution =="Ubuntu"
24 |
--------------------------------------------------------------------------------
/crio-playbook/version/scripts/check_crio.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | if [ -e '/var/run/docker.sock' ]; then
3 | docker_installed=`curl -sS --unix-socket /var/run/docker.sock http:/v1.24/info | jq -r '.RegistryConfig.IndexConfigs."'docker.io'".Name'`
4 | if [ "${docker_installed}" == "docker.io" ]; then
5 | echo -n true
6 | else
7 | # docker.sock is exists but docker service is not started
8 | if [ -e '/etc/containers/registries.conf' ]; then
9 | insecure_harbor=`cat /etc/containers/registries.conf |grep $1 |awk -F'=' '{print $2}' |awk -F'"' '{print $2}'`
10 | if [ "${insecure_harbor}" == "$1" ]; then
11 | if [ -e '/var/run/crio/crio.sock' ]; then
12 | cgroup_manager=`curl -sS --unix-socket /var/run/crio/crio.sock http://localhost/config |grep cgroup_manager |awk -F '"' '{print $2}'`
13 | if [ "${cgroup_manager}" == "systemd" ]; then
14 | echo -n true
15 | else
16 | # crio.sock is exists but crio service is not started
17 | echo -n false
18 | fi
19 | else
20 | # crio.sock is not exists
21 | echo -n false
22 | fi
23 | else
24 | # crio is not installed with Breeze
25 | echo -n false
26 | fi
27 | else
28 | # crio is not installed
29 | echo -n false
30 | fi
31 | fi
32 | else
33 | # docker is not installed
34 | if [ -e '/etc/containers/registries.conf' ]; then
35 | insecure_harbor=`cat /etc/containers/registries.conf |grep $1 |awk -F'=' '{print $2}' |awk -F'"' '{print $2}'`
36 | if [ "${insecure_harbor}" == "$1" ]; then
37 | if [ -e '/var/run/crio/crio.sock' ]; then
38 | cgroup_manager=`curl -sS --unix-socket /var/run/crio/crio.sock http://localhost/config |grep cgroup_manager |awk -F '"' '{print $2}'`
39 | if [ "${cgroup_manager}" == "systemd" ]; then
40 | echo -n true
41 | else
42 | # crio.sock is exists but crio service is not started
43 | echo -n false
44 | fi
45 | else
46 | # crio.sock is not exists
47 | echo -n false
48 | fi
49 | else
50 | # crio is not installed with Breeze
51 | echo -n false
52 | fi
53 | else
54 | # crio is not installed
55 | echo -n false
56 | fi
57 | fi
58 |
--------------------------------------------------------------------------------
/crio-playbook/version/scripts/check_environment.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
6 |
7 | : '
8 | function version_le() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" == "$1"; }
9 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" != "$1"; }
10 | function version_ge() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; }
11 |
12 | if version_gt $VERSION $VERSION2; then
13 | echo "$VERSION is greater than $VERSION2"
14 | fi
15 |
16 | if version_le $VERSION $VERSION2; then
17 | echo "$VERSION is less than or equal to $VERSION2"
18 | fi
19 |
20 | if version_lt $VERSION $VERSION2; then
21 | echo "$VERSION is less than $VERSION2"
22 | fi
23 |
24 | if version_ge $VERSION $VERSION2; then
25 | echo "$VERSION is greater than or equal to $VERSION2"
26 | fi
27 | '
28 |
29 | [ ${BREEZE_LSB_ID} ]
30 | [ ${BREEZE_LSB_RELEASE} ]
31 | [ ${BREEZE_PYTHON_VERSION} ]
32 |
33 | if [[ "${BREEZE_LSB_ID}" != "RedHat" ]] && [[ "${BREEZE_LSB_ID}" != "CentOS" ]] && [[ "${BREEZE_LSB_ID}" != "OracleLinux" ]] && [[ "${BREEZE_LSB_ID}" != "Rocky" ]] && [[ "${BREEZE_LSB_ID}" != "AlmaLinux" ]] && [[ "${BREEZE_LSB_ID}" != "Anolis" ]] && [[ "${BREEZE_LSB_ID}" != "Ubuntu" ]]; then
34 | echo "please use RHEL/CentOS/AlmaLinux/RockyLinux/OracleLinux or Ubuntu"
35 | exit
36 | fi
37 |
38 | if version_gt 8.4 ${BREEZE_LSB_RELEASE} && [[ "${BREEZE_LSB_ID}" == "RedHat" ]]; then
39 | echo "please use RHEL 8.x (x>3) for Breeze"
40 | exit
41 | fi
42 |
43 | if version_gt 8.4 ${BREEZE_LSB_RELEASE} && [[ "${BREEZE_LSB_ID}" == "CentOS" ]]; then
44 | echo "please use CentOS 8.x (x>3) for Breeze"
45 | exit
46 | fi
47 |
48 | if version_gt 20 ${BREEZE_LSB_RELEASE} && [[ "${BREEZE_LSB_ID}" == "Ubuntu" ]]; then
49 | echo "please use Ubuntu 20/22 for Breeze"
50 | exit
51 | fi
52 |
53 | if version_gt 2.7 ${BREEZE_PYTHON_VERSION}; then
54 | echo "please use python 2.7+"
55 | exit
56 | fi
57 |
58 | printf true
59 |
--------------------------------------------------------------------------------
/crio-playbook/version/scripts/fix-crio-bug.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | podman image trust set -f /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release registry.access.redhat.com
3 | podman image trust set -f /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release registry.redhat.io
4 | cat < /etc/containers/registries.d/registry.access.redhat.com.yaml
5 | docker:
6 | registry.access.redhat.com:
7 | sigstore: https://access.redhat.com/webassets/docker/content/sigstore
8 | EOF
9 |
10 | > cat < /etc/containers/registries.d/registry.redhat.io.yaml
11 | docker:
12 | registry.redhat.io:
13 | sigstore: https://registry.redhat.io/containers/sigstore
14 | EOF
15 |
--------------------------------------------------------------------------------
/crio-playbook/version/scripts/fix-ubuntu-docker-warning.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sed -i 's#GRUB_CMDLINE_LINUX=""#GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"#g' /etc/default/grub
3 | update-grub
4 | #reboot
5 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/chrony/redhat-centos/chrony-client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
--------------------------------------------------------------------------------
/crio-playbook/version/template/chrony/redhat-centos/chrony-server.conf.j2:
--------------------------------------------------------------------------------
1 | # Use public servers from the pool.ntp.org project.
2 | # Please consider joining the pool (https://www.pool.ntp.org/join.html).
3 | #pool 2.pool.ntp.org iburst
4 |
5 | server {{ ansible_play_batch[0] }} iburst
6 |
7 | # Use NTP servers from DHCP.
8 | #sourcedir /run/chrony-dhcp
9 |
10 | # Record the rate at which the system clock gains/losses time.
11 | driftfile /var/lib/chrony/drift
12 |
13 | # Allow the system clock to be stepped in the first three updates
14 | # if its offset is larger than 1 second.
15 | makestep 1.0 3
16 |
17 | # Enable kernel synchronization of the real-time clock (RTC).
18 | rtcsync
19 |
20 | # Enable hardware timestamping on all interfaces that support it.
21 | #hwtimestamp *
22 |
23 | # Increase the minimum number of selectable sources required to adjust
24 | # the system clock.
25 | #minsources 2
26 |
27 | # Allow NTP client access from local network.
28 | #allow 192.168.0.0/16
29 | allow
30 |
31 | # Serve time even if not synchronized to a time source.
32 | local stratum 10
33 |
34 | # Require authentication (nts or key option) for all NTP sources.
35 | #authselectmode require
36 |
37 | # Specify file containing keys for NTP authentication.
38 | keyfile /etc/chrony.keys
39 |
40 | # Save NTS keys and cookies.
41 | #ntsdumpdir /var/lib/chrony
42 |
43 | # Insert/delete leap seconds by slewing instead of stepping.
44 | #leapsecmode slew
45 |
46 | # Get TAI-UTC offset and leap seconds from the system tz database.
47 | leapsectz right/UTC
48 |
49 | # Specify directory for log files.
50 | logdir /var/log/chrony
51 |
52 | # Select which information is logged.
53 | #log measurements statistics tracking
54 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/chrony/ubuntu20/chrony-client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
--------------------------------------------------------------------------------
/crio-playbook/version/template/chrony/ubuntu20/chrony-server.conf.j2:
--------------------------------------------------------------------------------
1 | # Welcome to the chrony configuration file. See chrony.conf(5) for more
2 | # information about usuable directives.
3 |
4 | # This will use (up to):
5 | # - 4 sources from ntp.ubuntu.com which some are ipv6 enabled
6 | # - 2 sources from 2.ubuntu.pool.ntp.org which is ipv6 enabled as well
7 | # - 1 source from [01].ubuntu.pool.ntp.org each (ipv4 only atm)
8 | # This means by default, up to 6 dual-stack and up to 2 additional IPv4-only
9 | # sources will be used.
10 | # At the same time it retains some protection against one of the entries being
11 | # down (compare to just using one of the lines). See (LP: #1754358) for the
12 | # discussion.
13 | #
14 | # About using servers from the NTP Pool Project in general see (LP: #104525).
15 | # Approved by Ubuntu Technical Board on 2011-02-08.
16 | # See http://www.pool.ntp.org/join.html for more information.
17 | #pool ntp.ubuntu.com iburst maxsources 4
18 | #pool 0.ubuntu.pool.ntp.org iburst maxsources 1
19 | #pool 1.ubuntu.pool.ntp.org iburst maxsources 1
20 | #pool 2.ubuntu.pool.ntp.org iburst maxsources 2
21 |
22 | server {{ ansible_play_batch[0] }} iburst
23 |
24 | # This directive specify the location of the file containing ID/key pairs for
25 | # NTP authentication.
26 | keyfile /etc/chrony/chrony.keys
27 |
28 | # This directive specify the file into which chronyd will store the rate
29 | # information.
30 | driftfile /var/lib/chrony/chrony.drift
31 |
32 | # Uncomment the following line to turn logging on.
33 | #log tracking measurements statistics
34 |
35 | # Log files location.
36 | logdir /var/log/chrony
37 |
38 | # Stop bad estimates upsetting machine clock.
39 | maxupdateskew 100.0
40 |
41 | # This directive enables kernel synchronisation (every 11 minutes) of the
42 | # real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
43 | rtcsync
44 |
45 | # Step the system clock instead of slewing it if the adjustment is larger than
46 | # one second, but only in the first three clock updates.
47 | makestep 1 3
48 |
49 | # Allow NTP client access from local network.
50 | allow
51 |
52 | # Serve time even if not synchronized to a time source.
53 | local stratum 10
54 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/chrony/ubuntu22/chrony-client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
--------------------------------------------------------------------------------
/crio-playbook/version/template/chrony/ubuntu22/chrony-server.conf.j2:
--------------------------------------------------------------------------------
1 | # Welcome to the chrony configuration file. See chrony.conf(5) for more
2 | # information about usable directives.
3 |
4 | # Include configuration files found in /etc/chrony/conf.d.
5 | confdir /etc/chrony/conf.d
6 |
7 | # This will use (up to):
8 | # - 4 sources from ntp.ubuntu.com which some are ipv6 enabled
9 | # - 2 sources from 2.ubuntu.pool.ntp.org which is ipv6 enabled as well
10 | # - 1 source from [01].ubuntu.pool.ntp.org each (ipv4 only atm)
11 | # This means by default, up to 6 dual-stack and up to 2 additional IPv4-only
12 | # sources will be used.
13 | # At the same time it retains some protection against one of the entries being
14 | # down (compare to just using one of the lines). See (LP: #1754358) for the
15 | # discussion.
16 | #
17 | # About using servers from the NTP Pool Project in general see (LP: #104525).
18 | # Approved by Ubuntu Technical Board on 2011-02-08.
19 | # See http://www.pool.ntp.org/join.html for more information.
20 | #pool ntp.ubuntu.com iburst maxsources 4
21 | #pool 0.ubuntu.pool.ntp.org iburst maxsources 1
22 | #pool 1.ubuntu.pool.ntp.org iburst maxsources 1
23 | #pool 2.ubuntu.pool.ntp.org iburst maxsources 2
24 |
25 | server {{ ansible_play_batch[0] }} iburst
26 |
27 | # Use time sources from DHCP.
28 | sourcedir /run/chrony-dhcp
29 |
30 | # Use NTP sources found in /etc/chrony/sources.d.
31 | sourcedir /etc/chrony/sources.d
32 |
33 | # This directive specify the location of the file containing ID/key pairs for
34 | # NTP authentication.
35 | keyfile /etc/chrony/chrony.keys
36 |
37 | # This directive specify the file into which chronyd will store the rate
38 | # information.
39 | driftfile /var/lib/chrony/chrony.drift
40 |
41 | # Save NTS keys and cookies.
42 | ntsdumpdir /var/lib/chrony
43 |
44 | # Uncomment the following line to turn logging on.
45 | #log tracking measurements statistics
46 |
47 | # Log files location.
48 | logdir /var/log/chrony
49 |
50 | # Stop bad estimates upsetting machine clock.
51 | maxupdateskew 100.0
52 |
53 | # This directive enables kernel synchronisation (every 11 minutes) of the
54 | # real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
55 | rtcsync
56 |
57 | # Step the system clock instead of slewing it if the adjustment is larger than
58 | # one second, but only in the first three clock updates.
59 | makestep 1 3
60 |
61 | # Get TAI-UTC offset and leap seconds from the system tz database.
62 | # This directive must be commented out when using time sources serving
63 | # leap-smeared time.
64 | leapsectz right/UTC
65 |
66 | # Allow NTP client access from local network.
67 | allow
68 |
69 | # Serve time even if not synchronized to a time source.
70 | local stratum 10
71 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/ipvs.conf.j2:
--------------------------------------------------------------------------------
1 | # Load IPVS at boot
2 | ip_vs
3 | ip_vs_rr
4 | ip_vs_wrr
5 | ip_vs_sh
6 | overlay
7 | br_netfilter
8 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/registries.conf.https.j2:
--------------------------------------------------------------------------------
1 | unqualified-search-registries = ["registry.fedoraproject.org", "registry.access.redhat.com", "registry.centos.org", "docker.io"]
2 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/registries.conf.j2:
--------------------------------------------------------------------------------
1 | unqualified-search-registries = ["registry.fedoraproject.org", "registry.access.redhat.com", "registry.centos.org", "docker.io"]
2 | [[registry]]
3 | location="{{ harbor }}"
4 | insecure=true
5 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/wise2c-centos8.repo.j2:
--------------------------------------------------------------------------------
1 | [wise2c-k8s]
2 | name=wise2c-k8s
3 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms/k8s/centos8
4 | enabled=1
5 | gpgcheck=0
6 | module_hotfixes=1
7 |
8 | [wise2c-docker]
9 | name=wise2c-docker
10 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms/docker/centos8
11 | enabled=1
12 | gpgcheck=0
13 | module_hotfixes=1
14 |
15 | [wise2c-ceph]
16 | name=wise2c-ceph
17 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms/ceph/centos8
18 | enabled=1
19 | gpgcheck=0
20 | module_hotfixes=1
21 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/wise2c-centos9.repo.j2:
--------------------------------------------------------------------------------
1 | [wise2c-k8s]
2 | name=wise2c-k8s
3 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms/k8s/centos9
4 | enabled=1
5 | gpgcheck=0
6 | module_hotfixes=1
7 |
8 | [wise2c-docker]
9 | name=wise2c-docker
10 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms/docker/centos9
11 | enabled=1
12 | gpgcheck=0
13 | module_hotfixes=1
14 |
15 | [wise2c-ceph]
16 | name=wise2c-ceph
17 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms/ceph/centos9
18 | enabled=1
19 | gpgcheck=0
20 | module_hotfixes=1
21 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/wise2c.list-ubuntu20.j2:
--------------------------------------------------------------------------------
1 | deb http://{{ wise2c_seed_host }}:2008/debs/ubuntu20 ./
2 |
--------------------------------------------------------------------------------
/crio-playbook/version/template/wise2c.list-ubuntu22.j2:
--------------------------------------------------------------------------------
1 | deb http://{{ wise2c_seed_host }}:2008/debs/ubuntu22 ./
2 |
--------------------------------------------------------------------------------
/crio-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [hosts]
2 | {{ range $v := .AllHosts -}}
3 | {{ $v.IP }} hostname={{ $v.HostName }}
4 | {{ end }}
--------------------------------------------------------------------------------
/crio-playbook/version/yat/hosts.yml.gotmpl:
--------------------------------------------------------------------------------
1 | wise2c_seed_host: wise2c-seed
2 | harbor: {{ .harbor.Inherent.endpoint }}
3 | harbor_https: {{ .harbor.Inherent.https }}
4 | format_hostname: {{ .crio.Property.format_hostname }}
5 |
--------------------------------------------------------------------------------
/docker-compose-aliyun.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | deploy:
4 | container_name: deploy-main
5 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/pagoda:v1.3.1
6 | restart: always
7 | entrypoint: sh
8 | command:
9 | - -c
10 | - "/root/pagoda -logtostderr -v 4 -w /workspace"
11 | ports:
12 | - 88:80
13 | - 8088:8080
14 | volumes:
15 | - $HOME/.ssh:/root/.ssh
16 | - $PWD/deploy:/deploy
17 | volumes_from:
18 | - playbook
19 | ui:
20 | container_name: deploy-ui
21 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/deploy-ui:v1.9.2
22 | restart: always
23 | network_mode: "service:deploy"
24 | playbook:
25 | container_name: deploy-playbook
26 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/playbook:v1.33.0
27 | volumes:
28 | - playbook:/workspace
29 | yum-repo:
30 | container_name: deploy-yumrepo
31 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/yum-repo:v1.33.0
32 | ports:
33 | - 2009:2009
34 | restart: always
35 | apt-source:
36 | container_name: deploy-aptsource
37 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/apt-source:v1.33.0
38 | ports:
39 | - 2008:2008
40 | restart: always
41 | volumes:
42 | playbook:
43 | external: false
44 |
--------------------------------------------------------------------------------
/docker-compose-centos-aliyun.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | deploy:
4 | container_name: deploy-main
5 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/pagoda:v1.3.1
6 | restart: always
7 | entrypoint: sh
8 | command:
9 | - -c
10 | - "/root/pagoda -logtostderr -v 4 -w /workspace"
11 | ports:
12 | - 88:80
13 | - 8088:8080
14 | volumes:
15 | - $HOME/.ssh:/root/.ssh
16 | - $PWD/deploy:/deploy
17 | volumes_from:
18 | - playbook
19 | ui:
20 | container_name: deploy-ui
21 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/deploy-ui:v1.9.2
22 | restart: always
23 | network_mode: "service:deploy"
24 | playbook:
25 | container_name: deploy-playbook
26 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/playbook:v1.33.0
27 | volumes:
28 | - playbook:/workspace
29 | yum-repo:
30 | container_name: deploy-yumrepo
31 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/yum-repo:v1.33.0
32 | ports:
33 | - 2009:2009
34 | restart: always
35 | volumes:
36 | playbook:
37 | external: false
38 |
--------------------------------------------------------------------------------
/docker-compose-centos.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | deploy:
4 | container_name: deploy-main
5 | image: wise2c/pagoda:v1.3.1
6 | restart: always
7 | entrypoint: sh
8 | command:
9 | - -c
10 | - "/root/pagoda -logtostderr -v 4 -w /workspace"
11 | ports:
12 | - 88:80
13 | - 8088:8080
14 | volumes:
15 | - $HOME/.ssh:/root/.ssh
16 | - $PWD/deploy:/deploy
17 | volumes_from:
18 | - playbook
19 | ui:
20 | container_name: deploy-ui
21 | image: wise2c/deploy-ui:v1.9.2
22 | restart: always
23 | network_mode: "service:deploy"
24 | playbook:
25 | container_name: deploy-playbook
26 | image: wise2c/playbook:v1.33.0
27 | volumes:
28 | - playbook:/workspace
29 | yum-repo:
30 | container_name: deploy-yumrepo
31 | image: wise2c/yum-repo:v1.33.0
32 | ports:
33 | - 2009:2009
34 | restart: always
35 | volumes:
36 | playbook:
37 | external: false
38 |
--------------------------------------------------------------------------------
/docker-compose-ubuntu-aliyun.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | deploy:
4 | container_name: deploy-main
5 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/pagoda:v1.3.1
6 | restart: always
7 | entrypoint: sh
8 | command:
9 | - -c
10 | - "/root/pagoda -logtostderr -v 4 -w /workspace"
11 | ports:
12 | - 88:80
13 | - 8088:8080
14 | volumes:
15 | - $HOME/.ssh:/root/.ssh
16 | - $PWD/deploy:/deploy
17 | volumes_from:
18 | - playbook
19 | ui:
20 | container_name: deploy-ui
21 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/deploy-ui:v1.9.2
22 | restart: always
23 | network_mode: "service:deploy"
24 | playbook:
25 | container_name: deploy-playbook
26 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/playbook:v1.33.0
27 | volumes:
28 | - playbook:/workspace
29 | apt-source:
30 | container_name: deploy-aptsource
31 | image: registry.cn-shenzhen.aliyuncs.com/breeze-project/apt-source:v1.33.0
32 | ports:
33 | - 2008:2008
34 | restart: always
35 | volumes:
36 | playbook:
37 | external: false
38 |
--------------------------------------------------------------------------------
/docker-compose-ubuntu.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | deploy:
4 | container_name: deploy-main
5 | image: wise2c/pagoda:v1.3.1
6 | restart: always
7 | entrypoint: sh
8 | command:
9 | - -c
10 | - "/root/pagoda -logtostderr -v 4 -w /workspace"
11 | ports:
12 | - 88:80
13 | - 8088:8080
14 | volumes:
15 | - $HOME/.ssh:/root/.ssh
16 | - $PWD/deploy:/deploy
17 | volumes_from:
18 | - playbook
19 | ui:
20 | container_name: deploy-ui
21 | image: wise2c/deploy-ui:v1.9.2
22 | restart: always
23 | network_mode: "service:deploy"
24 | playbook:
25 | container_name: deploy-playbook
26 | image: wise2c/playbook:v1.33.0
27 | volumes:
28 | - playbook:/workspace
29 | apt-source:
30 | container_name: deploy-aptsource
31 | image: wise2c/apt-source:v1.33.0
32 | ports:
33 | - 2008:2008
34 | restart: always
35 | volumes:
36 | playbook:
37 | external: false
38 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | deploy:
4 | container_name: deploy-main
5 | image: wise2c/pagoda:v1.3.1
6 | restart: always
7 | entrypoint: sh
8 | command:
9 | - -c
10 | - "/root/pagoda -logtostderr -v 4 -w /workspace"
11 | ports:
12 | - 88:80
13 | - 8088:8080
14 | volumes:
15 | - $HOME/.ssh:/root/.ssh
16 | - $PWD/deploy:/deploy
17 | volumes_from:
18 | - playbook
19 | ui:
20 | container_name: deploy-ui
21 | image: wise2c/deploy-ui:v1.9.2
22 | restart: always
23 | network_mode: "service:deploy"
24 | playbook:
25 | container_name: deploy-playbook
26 | image: wise2c/playbook:v1.33.0
27 | volumes:
28 | - playbook:/workspace
29 | yum-repo:
30 | container_name: deploy-yumrepo
31 | image: wise2c/yum-repo:v1.33.0
32 | ports:
33 | - 2009:2009
34 | restart: always
35 | apt-source:
36 | container_name: deploy-aptsource
37 | image: wise2c/apt-source:v1.33.0
38 | ports:
39 | - 2008:2008
40 | restart: always
41 | volumes:
42 | playbook:
43 | external: false
44 |
--------------------------------------------------------------------------------
/docker-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/docker-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/docker-playbook/version/file/README.md
--------------------------------------------------------------------------------
/docker-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/docker-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/docker-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docker-playbook/version/install.ansible:
--------------------------------------------------------------------------------
1 | - name: init host
2 | hosts: hosts
3 | user: root
4 | any_errors_fatal: true
5 | vars:
6 | path: /var/lib/wise2c/tmp/docker
7 | tasks:
8 | - name: check environment
9 | script: scripts/check_environment.sh
10 | register: check_env_output
11 | environment:
12 | BREEZE_LSB_ID: "{{ ansible_facts.distribution }}"
13 | BREEZE_LSB_RELEASE: "{{ ansible_facts.distribution_version }}"
14 | BREEZE_PYTHON_VERSION: "{{ ansible_facts.python_version }}"
15 |
16 | - name: exit
17 | fail:
18 | msg: "{{ check_env_output.stdout }}"
19 | when: check_env_output.stdout != "true"
20 |
21 | - name: get seed ip
22 | shell:
23 | echo $SSH_CONNECTION | cut -d " " -f 1
24 | register: ip
25 |
26 | - name: add seed to /etc/hosts
27 | blockinfile:
28 | path: /etc/hosts
29 | block: '{{ ip.stdout }} {{ wise2c_seed_host }}'
30 | marker: '# {mark} WISE2C DEPLOY MANAGED BLOCK {{ wise2c_seed_host }}'
31 |
32 | - name: add to /etc/hosts
33 | blockinfile:
34 | path: /etc/hosts
35 | block: '{{ item.key }} {{ item.value.hostname }}'
36 | marker: "# {mark} WISE2C DEPLOY MANAGED BLOCK {{ item.key }}"
37 | with_dict: "{{ hostvars }}"
38 |
39 | - name: check docker
40 | script: scripts/check_docker.sh {{ harbor }}
41 | register: check_output
42 |
43 | - name: setup docker on all nodes
44 | include_tasks: docker.ansible
45 | when: check_output.stdout != 'true'
46 |
--------------------------------------------------------------------------------
/docker-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "format_hostname",
4 | "label": "change host name",
5 | "description": "Change all server host name to the name defined in UI",
6 | "type": "bool",
7 | "default": "false",
8 | "required": true
9 | }
10 | ]
11 |
--------------------------------------------------------------------------------
/docker-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean docker
2 | hosts: hosts
3 | user: root
4 | tasks:
5 | - name: remove docker for Redhat/CentOS
6 | yum:
7 | disablerepo: '*'
8 | enablerepo: wise2c
9 | state: absent
10 | name: '{{ item }}'
11 | with_items:
12 | - docker-ce
13 | - docker-compose
14 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS")
15 |
16 | - name: remove docker for Ubuntu
17 | apt:
18 | state: absent
19 | name: '{{ item }}'
20 | with_items:
21 | - docker-ce
22 | - docker-compose
23 | when: ansible_distribution =="Ubuntu"
24 |
25 |
--------------------------------------------------------------------------------
/docker-playbook/version/scripts/check_docker.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | insecure_harbor=`curl -sS --unix-socket /var/run/docker.sock http:/v1.24/info | jq -r '.RegistryConfig.IndexConfigs."'$1'".Name'`
4 | if [ "${insecure_harbor}" == "$1" ]; then
5 | echo -n true
6 | else
7 | driver=`curl -sS --unix-socket /var/run/docker.sock http:/v1.24/info | jq -r .Driver`
8 | cgroupdriver=`curl -sS --unix-socket /var/run/docker.sock http:/v1.24/info | jq -r .CgroupDriver`
9 | if [ ${driver} == 'overlay2' ] && [ ${cgroupdriver} == 'systemd' ]; then
10 | echo -n true
11 | else
12 | echo -n false
13 | fi
14 | fi
15 |
--------------------------------------------------------------------------------
/docker-playbook/version/scripts/check_environment.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
6 |
7 | : '
8 | function version_le() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" == "$1"; }
9 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" != "$1"; }
10 | function version_ge() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; }
11 |
12 | if version_gt $VERSION $VERSION2; then
13 | echo "$VERSION is greater than $VERSION2"
14 | fi
15 |
16 | if version_le $VERSION $VERSION2; then
17 | echo "$VERSION is less than or equal to $VERSION2"
18 | fi
19 |
20 | if version_lt $VERSION $VERSION2; then
21 | echo "$VERSION is less than $VERSION2"
22 | fi
23 |
24 | if version_ge $VERSION $VERSION2; then
25 | echo "$VERSION is greater than or equal to $VERSION2"
26 | fi
27 | '
28 |
29 | [ ${BREEZE_LSB_ID} ]
30 | [ ${BREEZE_LSB_RELEASE} ]
31 | [ ${BREEZE_PYTHON_VERSION} ]
32 |
33 | if [[ "${BREEZE_LSB_ID}" != "RedHat" ]] && [[ "${BREEZE_LSB_ID}" != "CentOS" ]] && [[ "${BREEZE_LSB_ID}" != "Ubuntu" ]]; then
34 | echo "please use RHEL or CentOS or Ubuntu"
35 | exit
36 | fi
37 |
38 | if version_gt 8.4 ${BREEZE_LSB_RELEASE} && [[ "${BREEZE_LSB_ID}" == "RedHat" ]]; then
39 | echo "please use RHEL 8.x (x>3) for Breeze"
40 | exit
41 | fi
42 |
43 | if version_gt 8.4 ${BREEZE_LSB_RELEASE} && [[ "${BREEZE_LSB_ID}" == "CentOS" ]]; then
44 | echo "please use CentOS 8.x (x>3) for Breeze"
45 | exit
46 | fi
47 |
48 | if version_gt 20 ${BREEZE_LSB_RELEASE} && [[ "${BREEZE_LSB_ID}" == "Ubuntu" ]]; then
49 | echo "please use Ubuntu 20/22 for Breeze"
50 | exit
51 | fi
52 |
53 | if version_gt 2.7 ${BREEZE_PYTHON_VERSION}; then
54 | echo "please use python 2.7+"
55 | exit
56 | fi
57 |
58 | printf true
59 |
--------------------------------------------------------------------------------
/docker-playbook/version/scripts/fix-ubuntu-docker-warning.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sed -i 's#GRUB_CMDLINE_LINUX=""#GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"#g' /etc/default/grub
3 | update-grub
4 | #reboot
5 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/chrony/redhat-centos/chrony-client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
--------------------------------------------------------------------------------
/docker-playbook/version/template/chrony/redhat-centos/chrony-server.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
3 |
4 | # Record the rate at which the system clock gains/losses time.
5 | driftfile /var/lib/chrony/drift
6 |
7 | # Allow the system clock to be stepped in the first three updates
8 | # if its offset is larger than 1 second.
9 | makestep 1.0 3
10 |
11 | # Enable kernel synchronization of the real-time clock (RTC).
12 | rtcsync
13 |
14 | # Allow NTP client access from local network.
15 | allow
16 |
17 | # Serve time even if not synchronized to a time source.
18 | local stratum 10
19 |
20 | # Specify directory for log files.
21 | logdir /var/log/chrony
--------------------------------------------------------------------------------
/docker-playbook/version/template/chrony/ubuntu16/chrony-client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
--------------------------------------------------------------------------------
/docker-playbook/version/template/chrony/ubuntu16/chrony-server.conf.j2:
--------------------------------------------------------------------------------
1 | # This the default chrony.conf file for the Debian chrony package. After
2 | # editing this file use the command 'invoke-rc.d chrony restart' to make
3 | # your changes take effect. John Hasler 1998-2008
4 |
5 | # See www.pool.ntp.org for an explanation of these servers. Please
6 | # consider joining the project if possible. If you can't or don't want to
7 | # use these servers I suggest that you try your ISP's nameservers. We mark
8 | # the servers 'offline' so that chronyd won't try to connect when the link
9 | # is down. Scripts in /etc/ppp/ip-up.d and /etc/ppp/ip-down.d use chronyc
10 | # commands to switch it on when a dialup link comes up and off when it goes
11 | # down. Code in /etc/init.d/chrony attempts to determine whether or not
12 | # the link is up at boot time and set the online status accordingly. If
13 | # you have an always-on connection such as cable omit the 'offline'
14 | # directive and chronyd will default to online.
15 | #
16 | # Note that if Chrony tries to go "online" and dns lookup of the servers
17 | # fails they will be discarded. Thus under some circumstances it is
18 | # better to use IP numbers than host names.
19 |
20 | pool {{ ansible_play_batch[0] }} offline iburst
21 |
22 | # Look here for the admin password needed for chronyc. The initial
23 | # password is generated by a random process at install time. You may
24 | # change it if you wish.
25 |
26 | keyfile /etc/chrony/chrony.keys
27 |
28 | # This directive sets the key ID used for authenticating user commands via the
29 | # 'chronyc' program at run time.
30 |
31 | commandkey 1
32 |
33 | # I moved the driftfile to /var/lib/chrony to comply with the Debian
34 | # filesystem standard.
35 |
36 | driftfile /var/lib/chrony/chrony.drift
37 |
38 | # Comment this line out to turn off logging.
39 |
40 | log tracking measurements statistics
41 | logdir /var/log/chrony
42 |
43 | # Stop bad estimates upsetting machine clock.
44 |
45 | maxupdateskew 100.0
46 |
47 | # Dump measurements when daemon exits.
48 |
49 | dumponexit
50 |
51 | # Specify directory for dumping measurements.
52 |
53 | dumpdir /var/lib/chrony
54 |
55 | # This directive lets 'chronyd' to serve time even if unsynchronised to any
56 | # NTP server.
57 |
58 | #local stratum 10
59 |
60 | # This directive designates subnets (or nodes) from which NTP clients are allowed
61 | # to access to 'chronyd'.
62 |
63 | #allow foo.example.net
64 | #allow 10/8
65 | #allow 0/0 (allow access by any IPv4 node)
66 | #allow ::/0 (allow access by any IPv6 node)
67 |
68 | # This directive forces `chronyd' to send a message to syslog if it
69 | # makes a system clock adjustment larger than a threshold value in seconds.
70 |
71 | logchange 0.5
72 |
73 | # This directive defines an email address to which mail should be sent
74 | # if chronyd applies a correction exceeding a particular threshold to the
75 | # system clock.
76 |
77 | # mailonchange root@localhost 0.5
78 |
79 | # This directive tells 'chronyd' to parse the 'adjtime' file to find out if the
80 | # real-time clock keeps local time or UTC. It overrides the 'rtconutc' directive.
81 |
82 | hwclockfile /etc/adjtime
83 |
84 | # This directive enables kernel synchronisation (every 11 minutes) of the
85 | # real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
86 |
87 | rtcsync
88 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/chrony/ubuntu18/chrony-client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
--------------------------------------------------------------------------------
/docker-playbook/version/template/chrony/ubuntu18/chrony-server.conf.j2:
--------------------------------------------------------------------------------
1 | # Use servers .
2 | server {{ ansible_play_batch[0] }} iburst
3 |
4 | # Record the rate at which the system clock gains/losses time.
5 | driftfile /var/lib/chrony/drift
6 |
7 | # Allow the system clock to be stepped in the first three updates
8 | # if its offset is larger than 1 second.
9 | makestep 1.0 3
10 |
11 | # Enable kernel synchronization of the real-time clock (RTC).
12 | rtcsync
13 |
14 | # Allow NTP client access from local network.
15 | allow
16 |
17 | # Serve time even if not synchronized to a time source.
18 | local stratum 10
19 |
20 | # Specify directory for log files.
21 | logdir /var/log/chrony
22 |
23 |
24 |
25 | # Welcome to the chrony configuration file. See chrony.conf(5) for more
26 | # information about usuable directives.
27 |
28 | # This will use (up to):
29 | # - 4 sources from ntp.ubuntu.com which some are ipv6 enabled
30 | # - 2 sources from 2.ubuntu.pool.ntp.org which is ipv6 enabled as well
31 | # - 1 source from [01].ubuntu.pool.ntp.org each (ipv4 only atm)
32 | # This means by default, up to 6 dual-stack and up to 2 additional IPv4-only
33 | # sources will be used.
34 | # At the same time it retains some protection against one of the entries being
35 | # down (compare to just using one of the lines). See (LP: #1754358) for the
36 | # discussion.
37 | #
38 | # About using servers from the NTP Pool Project in general see (LP: #104525).
39 | # Approved by Ubuntu Technical Board on 2011-02-08.
40 | # See http://www.pool.ntp.org/join.html for more information.
41 | #pool ntp.ubuntu.com iburst maxsources 4
42 | #pool 0.ubuntu.pool.ntp.org iburst maxsources 1
43 | #pool 1.ubuntu.pool.ntp.org iburst maxsources 1
44 | #pool 2.ubuntu.pool.ntp.org iburst maxsources 2
45 | pool {{ ansible_play_batch[0] }} iburst maxsources 1
46 |
47 | #the iburst option is used to speed up the initial synchronisation.
48 | #the maxsources refers the maximum number of NTP sources.
49 |
50 | # This directive specify the location of the file containing ID/key pairs for
51 | # NTP authentication.
52 | keyfile /etc/chrony/chrony.keys
53 |
54 | # This directive specify the file into which chronyd will store the rate
55 | # information.
56 | driftfile /var/lib/chrony/chrony.drift
57 |
58 | # Uncomment the following line to turn logging on.
59 | #log tracking measurements statistics
60 |
61 | # Log files location.
62 | logdir /var/log/chrony
63 |
64 | # Stop bad estimates upsetting machine clock.
65 | maxupdateskew 100.0
66 |
67 | # This directive enables kernel synchronisation (every 11 minutes) of the
68 | # real-time clock. Note that it can¡¯t be used along with the 'rtcfile' directive.
69 | rtcsync
70 |
71 | # Step the system clock instead of slewing it if the adjustment is larger than
72 | # one second, but only in the first three clock updates.
73 | makestep 1 3
74 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/daemon.json.https.j2:
--------------------------------------------------------------------------------
1 | {
2 | "exec-opts": [
3 | "native.cgroupdriver=systemd"
4 | ],
5 | "log-driver": "json-file",
6 | "log-opts": {
7 | "max-size": "100m",
8 | "max-file": "5"
9 | },
10 | "storage-driver": "overlay2"
11 | }
12 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/daemon.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "exec-opts": [
3 | "native.cgroupdriver=systemd"
4 | ],
5 | "log-driver": "json-file",
6 | "log-opts": {
7 | "max-size": "100m",
8 | "max-file": "5"
9 | },
10 | "insecure-registries": [
11 | "{{ harbor }}"
12 | ],
13 | "storage-driver": "overlay2"
14 | }
15 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/ipvs.conf.j2:
--------------------------------------------------------------------------------
1 | # Load IPVS at boot
2 | ip_vs
3 | ip_vs_rr
4 | ip_vs_wrr
5 | ip_vs_sh
6 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/wise2c.list-ubuntu20.j2:
--------------------------------------------------------------------------------
1 | deb http://{{ wise2c_seed_host }}:2008/debs/ubuntu20 ./
2 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/wise2c.list-ubuntu22.j2:
--------------------------------------------------------------------------------
1 | deb http://{{ wise2c_seed_host }}:2008/debs/ubuntu22 ./
2 |
--------------------------------------------------------------------------------
/docker-playbook/version/template/wise2c.repo.j2:
--------------------------------------------------------------------------------
1 | [wise2c]
2 | name=wise2c
3 | baseurl=http://{{ wise2c_seed_host }}:2009/rpms
4 | enabled=1
5 | gpgcheck=0
6 |
--------------------------------------------------------------------------------
/docker-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [hosts]
2 | {{ range $v := .AllHosts -}}
3 | {{ $v.IP }} hostname={{ $v.HostName }}
4 | {{ end }}
--------------------------------------------------------------------------------
/docker-playbook/version/yat/hosts.yml.gotmpl:
--------------------------------------------------------------------------------
1 | wise2c_seed_host: wise2c-seed
2 | harbor: {{ .harbor.Inherent.endpoint }}
3 | harbor_https: {{ .harbor.Inherent.https }}
4 | format_hostname: {{ .docker.Property.format_hostname }}
5 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/elasticcloud-playbook/version/file/README.md
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/file/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | cd /var/lib/wise2c/tmp/elasticcloud
4 |
5 | # Elastic Operator deploy
6 | kubectl create -f ./crds.yml
7 | kubectl create -f ./eck.yml
8 |
9 | # Wait for CRDs to be ready.
10 | printf "Waiting for ElasticCloud Operator to register custom resource definitions..."
11 |
12 | crd_apmservers_status="false"
13 | until [ "$crd_apmservers_status" = "True" ]; do sleep 1; printf "."; crd_apmservers_status=`kubectl get customresourcedefinitions apmservers.apm.k8s.elastic.co -o jsonpath='{.status.conditions[1].status}' 2>&1`; done
14 |
15 | crd_elasticsearches_status="false"
16 | until [ "$crd_elasticsearches_status" = "True" ]; do sleep 1; printf "."; crd_elasticsearches_status=`kubectl get customresourcedefinitions elasticsearches.elasticsearch.k8s.elastic.co -o jsonpath='{.status.conditions[1].status}' 2>&1`; done
17 |
18 | crd_kibanas_status="false"
19 | until [ "$crd_kibanas_status" = "True" ]; do sleep 1; printf "."; crd_kibanas_status=`kubectl get customresourcedefinitions kibanas.kibana.k8s.elastic.co -o jsonpath='{.status.conditions[1].status}' 2>&1`; done
20 |
21 | until kubectl get apmservers.apm.k8s.elastic.co > /dev/null 2>&1; do sleep 1; printf "."; done
22 | until kubectl get elasticsearches.elasticsearch.k8s.elastic.co > /dev/null 2>&1; do sleep 1; printf "."; done
23 | until kubectl get kibanas.kibana.k8s.elastic.co > /dev/null 2>&1; do sleep 1; printf "."; done
24 |
25 | echo 'Elastic Cloud CRD is ready!'
26 |
27 | kubectl apply -f elasticsearch.yml
28 | kubectl apply -f kibana.yml
29 | kubectl apply -f filebeat.yml
30 | kubectl apply -f elasticsearch-service.yml
31 | kubectl apply -f kibana-service.yml
32 |
33 | echo 'Elastic Cloud has been deployed.'
34 |
35 | # Deploy Fluentd
36 | set +e
37 | estatus="false"
38 | until [ "$estatus" = "Secret" ]; do sleep 1; printf "."; estatus=`kubectl get secret quickstart-es-elastic-user -o jsonpath='{.kind}'`; done
39 | PASSWORD=$(kubectl get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode)
40 | sed -i "s,changeme,${PASSWORD},g" fluentd.yml
41 | #kubectl apply -f fluentd.yml
42 | # https://github.com/fluent/fluent-plugin-parser-cri
43 | # cri log parser is not implemented
44 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/file/remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/filebeat.yml
3 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/kibana-service.yml
4 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/elasticsearch-service.yml
5 | kubectl delete kibana quickstart
6 | kubectl delete elasticsearch quickstart
7 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/kibana.yml
8 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/elasticsearch.yml
9 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/eck.yml
10 | kubectl delete -f /var/lib/wise2c/tmp/elasticcloud/crds.yml
11 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/elasticcloud-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/group_vars/elasticcloud.yml:
--------------------------------------------------------------------------------
1 | cpath: /var/lib/wise2c/tmp/elasticcloud
2 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | path=`dirname $0`
6 |
7 | ElasticCloudVersion=`cat ${path}/components-version.txt |grep "ElasticCloud" |awk '{print $3}'`
8 | ElasticStackVersion=`cat ${path}/components-version.txt |grep "ElasticStack" |awk '{print $3}'`
9 |
10 | echo "" >> ${path}/group_vars/elasticcloud.yml
11 | echo "elastic_cloud_version: ${ElasticCloudVersion}" >> ${path}/group_vars/elasticcloud.yml
12 | echo "elastic_stack_version: ${ElasticStackVersion}" >> ${path}/group_vars/elasticcloud.yml
13 |
14 | curl -L -o ${path}/template/crds.yml.j2 https://download.elastic.co/downloads/eck/${ElasticCloudVersion}/crds.yaml
15 | curl -L -o ${path}/template/eck.yml.j2 https://download.elastic.co/downloads/eck/${ElasticCloudVersion}/operator.yaml
16 |
17 | cat ${path}/template/eck.yml.j2 |grep 'image: "docker.elastic.co/eck/' |awk -F":" '{print $2":"$3}' |awk -F'"' '{print $2}' > images-list.txt
18 | echo "docker.elastic.co/elasticsearch/elasticsearch:${ElasticStackVersion}" >> images-list.txt
19 | echo "docker.elastic.co/kibana/kibana:${ElasticStackVersion}" >> images-list.txt
20 | echo "docker.elastic.co/beats/filebeat:${ElasticStackVersion}" >> images-list.txt
21 | #echo "fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch" >> images-list.txt
22 |
23 | echo 'Images list for Elastic Cloud:'
24 | cat images-list.txt
25 |
26 | for file in $(cat images-list.txt); do docker pull $file; done
27 | echo 'Images pulled.'
28 |
29 | docker save $(cat images-list.txt) -o ${path}/file/elastic-cloud-images.tar
30 | echo 'Images saved.'
31 | bzip2 -z --best ${path}/file/elastic-cloud-images.tar
32 | echo 'Images are compressed as bzip format.'
33 |
34 | sed -i "s,docker.elastic.co/eck,{{ registry_endpoint }}/{{ registry_project }},g" ${path}/template/eck.yml.j2
35 |
36 | curl -L -o ${path}/template/fluentd.yml.j2 https://raw.githubusercontent.com/fluent/fluentd-kubernetes-daemonset/master/fluentd-daemonset-elasticsearch-rbac.yaml
37 | sed -i "s,fluent/fluentd-kubernetes-daemonset,{{ registry_endpoint }}/{{ registry_project }}/fluentd-kubernetes-daemonset,g" ${path}/template/fluentd.yml.j2
38 | sed -i "s,elasticsearch-logging,quickstart-es-http.default.svc.cluster.local,g" ${path}/template/fluentd.yml.j2
39 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "self",
4 | "label": "Elastic Cloud on Kubernetes (ECK) installation node",
5 | "description": "please select a kubernetes worker node for installation",
6 | "type": "host",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "ElasticsearchNodePort",
12 | "label": "NodePort for Elasticsearch",
13 | "description": "NodePort for Elasticsearch service, e.g.: 30920",
14 | "type": "int",
15 | "default": "30920",
16 | "required": true
17 | },
18 | {
19 | "variable": "KibanaNodePort",
20 | "label": "NodePort for Kibana",
21 | "description": "NodePort for Kibana service, e.g.: 30561",
22 | "type": "int",
23 | "default": "30561",
24 | "required": true
25 | }
26 | ]
27 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean elastic cloud operator
2 | hosts: all
3 | user: root
4 | tasks:
5 | - name: copy k8s admin.conf for elastic cloud installation
6 | copy:
7 | src: '{{ item.src }}'
8 | dest: '{{ item.dest }}'
9 | with_items:
10 | - { src: '../../kubernetes-playbook/{{ kubernetes_version }}/file/admin.conf', dest: '{{ ansible_env.HOME }}/.kube/config' }
11 |
12 | - name: setup kubectl certification
13 | shell: |
14 | sed -i "s/.*server:.*/ server: https:\/\/{{ kubernetes_endpoint }}/g" $HOME/.kube/config
15 | chown $(id -u):$(id -g) $HOME/.kube/config
16 |
17 | - name: stop & rm elastic cloud service
18 | shell: ./remove.sh
19 | args:
20 | chdir: '{{ cpath }}'
21 | ignore_errors: true
22 |
23 | - name: remove kubectl cert
24 | file:
25 | path: '{{ item }}'
26 | state: absent
27 | with_items:
28 | - '{{ ansible_env.HOME }}/.kube/config'
29 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/template/elasticsearch-service.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | common.k8s.elastic.co/type: elasticsearch
6 | elasticsearch.k8s.elastic.co/cluster-name: quickstart
7 | name: quickstart-es-http
8 | namespace: default
9 | spec:
10 | externalTrafficPolicy: Cluster
11 | ports:
12 | - name: https
13 | nodePort: {{ elasticsearch_nodeport }}
14 | port: 9200
15 | protocol: TCP
16 | targetPort: 9200
17 | selector:
18 | common.k8s.elastic.co/type: elasticsearch
19 | elasticsearch.k8s.elastic.co/cluster-name: quickstart
20 | sessionAffinity: None
21 | type: NodePort
22 | status:
23 | loadBalancer: {}
24 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/template/elasticsearch.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: elasticsearch.k8s.elastic.co/v1
2 | kind: Elasticsearch
3 | metadata:
4 | name: quickstart
5 | spec:
6 | image: {{ registry_endpoint }}/{{ registry_project }}/elasticsearch:{{ elastic_stack_version }}
7 | version: {{ elastic_stack_version }}
8 | http:
9 | tls:
10 | selfSignedCertificate:
11 | disabled: true
12 | nodeSets:
13 | - name: default
14 | count: 1
15 | config:
16 | node.store.allow_mmap: false
17 | podTemplate:
18 | spec:
19 | containers:
20 | - name: elasticsearch
21 | securityContext:
22 | capabilities:
23 | add:
24 | - SYS_CHROOT
25 | volumes:
26 | - name: elasticsearch-data
27 | emptyDir: {}
28 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/template/filebeat.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: beat.k8s.elastic.co/v1beta1
2 | kind: Beat
3 | metadata:
4 | name: quickstart
5 | spec:
6 | type: filebeat
7 | version: {{ elastic_stack_version }}
8 | elasticsearchRef:
9 | name: quickstart
10 | config:
11 | filebeat.inputs:
12 | - type: container
13 | paths:
14 | - /var/log/containers/*.log
15 | daemonSet:
16 | podTemplate:
17 | spec:
18 | dnsPolicy: ClusterFirstWithHostNet
19 | hostNetwork: true
20 | securityContext:
21 | runAsUser: 0
22 | tolerations:
23 | - key: node-role.kubernetes.io/master
24 | effect: NoSchedule
25 | containers:
26 | - name: filebeat
27 | image: {{ registry_endpoint }}/{{ registry_project }}/filebeat:{{ elastic_stack_version }}
28 | volumeMounts:
29 | - name: varlogcontainers
30 | mountPath: /var/log/containers
31 | - name: varlogpods
32 | mountPath: /var/log/pods
33 | - name: varlibdockercontainers
34 | mountPath: /var/lib/docker/containers
35 | volumes:
36 | - name: varlogcontainers
37 | hostPath:
38 | path: /var/log/containers
39 | - name: varlogpods
40 | hostPath:
41 | path: /var/log/pods
42 | - name: varlibdockercontainers
43 | hostPath:
44 | path: /var/lib/docker/containers
45 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/template/fluentd.yml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: fluentd
6 | namespace: kube-system
7 |
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1beta1
10 | kind: ClusterRole
11 | metadata:
12 | name: fluentd
13 | namespace: kube-system
14 | rules:
15 | - apiGroups:
16 | - ""
17 | resources:
18 | - pods
19 | - namespaces
20 | verbs:
21 | - get
22 | - list
23 | - watch
24 |
25 | ---
26 | kind: ClusterRoleBinding
27 | apiVersion: rbac.authorization.k8s.io/v1beta1
28 | metadata:
29 | name: fluentd
30 | roleRef:
31 | kind: ClusterRole
32 | name: fluentd
33 | apiGroup: rbac.authorization.k8s.io
34 | subjects:
35 | - kind: ServiceAccount
36 | name: fluentd
37 | namespace: kube-system
38 | ---
39 | apiVersion: apps/v1
40 | kind: DaemonSet
41 | metadata:
42 | name: fluentd
43 | namespace: kube-system
44 | labels:
45 | k8s-app: fluentd-logging
46 | version: v1
47 | spec:
48 | selector:
49 | matchLabels:
50 | k8s-app: fluentd-logging
51 | version: v1
52 | template:
53 | metadata:
54 | labels:
55 | k8s-app: fluentd-logging
56 | version: v1
57 | spec:
58 | serviceAccount: fluentd
59 | serviceAccountName: fluentd
60 | tolerations:
61 | - key: node-role.kubernetes.io/master
62 | effect: NoSchedule
63 | containers:
64 | - name: fluentd
65 | #image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch
66 | image: {{ registry_endpoint }}/{{ registry_project }}/fluentd-kubernetes-daemonset:v1-debian-elasticsearch
67 | env:
68 | - name: FLUENT_ELASTICSEARCH_HOST
69 | value: "quickstart-es-http.default.svc.cluster.local"
70 | - name: FLUENT_ELASTICSEARCH_PORT
71 | value: "9200"
72 | - name: FLUENT_ELASTICSEARCH_SCHEME
73 | value: "http"
74 | # X-Pack Authentication
75 | # =====================
76 | - name: FLUENT_ELASTICSEARCH_USER
77 | value: "elastic"
78 | - name: FLUENT_ELASTICSEARCH_PASSWORD
79 | value: "elastic_user_password"
80 | resources:
81 | limits:
82 | memory: 200Mi
83 | requests:
84 | cpu: 100m
85 | memory: 200Mi
86 | volumeMounts:
87 | - name: varlog
88 | mountPath: /var/log
89 | - name: varlibdockercontainers
90 | mountPath: /var/lib/docker/containers
91 | readOnly: true
92 | terminationGracePeriodSeconds: 30
93 | volumes:
94 | - name: varlog
95 | hostPath:
96 | path: /var/log
97 | - name: varlibdockercontainers
98 | hostPath:
99 | path: /var/lib/docker/containers
100 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/template/kibana-service.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | common.k8s.elastic.co/type: kibana
6 | kibana.k8s.elastic.co/name: quickstart
7 | name: quickstart-kibana-http
8 | namespace: default
9 | spec:
10 | externalTrafficPolicy: Cluster
11 | ports:
12 | - nodePort: {{ kibana_nodeport }}
13 | port: 5601
14 | protocol: TCP
15 | targetPort: 5601
16 | selector:
17 | common.k8s.elastic.co/type: kibana
18 | kibana.k8s.elastic.co/name: quickstart
19 | sessionAffinity: None
20 | type: NodePort
21 | status:
22 | loadBalancer: {}
23 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/template/kibana.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: kibana.k8s.elastic.co/v1beta1
2 | kind: Kibana
3 | metadata:
4 | name: quickstart
5 | spec:
6 | image: {{ registry_endpoint }}/{{ registry_project }}/kibana:{{ elastic_stack_version }}
7 | version: {{ elastic_stack_version }}
8 | http:
9 | tls:
10 | selfSignedCertificate:
11 | disabled: true
12 | count: 1
13 | elasticsearchRef:
14 | name: quickstart
15 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/yat/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/elasticcloud-playbook/version/yat/README.md
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/yat/all.yml.gotmpl:
--------------------------------------------------------------------------------
1 | registry_endpoint: {{ .harbor.Inherent.endpoint }}
2 | registry_user: {{ .harbor.Inherent.user }}
3 | registry_password: {{ .harbor.Inherent.password }}
4 | registry_project: library
5 | etcd_endpoint: {{ .etcd.Inherent.endpoint }}
6 | prometheus_nodeport: {{ .prometheus.Property.PrometheusNodePort }}
7 | alertmanager_nodeport: {{ .prometheus.Property.AlertManagerNodePort }}
8 | grafana_nodeport: {{ .prometheus.Property.GrafanaNodePort }}
9 | kubernetes_endpoint: {{ .kubernetes.Inherent.endpoint }}
10 | kubernetes_version: {{ .kubernetes.Inherent.version }}
11 | etcd_version: {{ .etcd.Inherent.etcd_version }}
12 | elasticsearch_nodeport: {{ .elasticcloud.Property.ElasticsearchNodePort }}
13 | kibana_nodeport: {{ .elasticcloud.Property.KibanaNodePort }}
14 |
--------------------------------------------------------------------------------
/elasticcloud-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [elasticcloud]
2 | {{ range $v := .elasticcloud.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
5 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/etcd-playbook/version-by-kubeadm/file/README.md
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/backup-etcd-database-folder.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cp -pr /data/etcd /data/etcd-backup-`date +%F-%H-%M-%S`
3 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "438000h"
5 | },
6 | "profiles": {
7 | "etcd": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth",
12 | "client auth"
13 | ],
14 | "expiry": "438000h"
15 | }
16 | }
17 | }
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "ca": {
4 | "expiry": "876000h"
5 | },
6 | "key": {
7 | "algo": "rsa",
8 | "size": 2048
9 | },
10 | "names": [
11 | {
12 | "C": "CN",
13 | "ST": "Guangdong",
14 | "L": "Shenzhen",
15 | "O": "etcd",
16 | "OU": "Wise2C"
17 | }
18 | ]
19 | }
20 |
21 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/etcd-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "L": "Shenzhen",
11 | "O": "etcd",
12 | "OU": "Wise2C"
13 | }
14 | ]
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/etcd-healthcheck.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | etcdurl=`podman inspect etcd |grep -A1 "\-\-advertise-client-urls" |grep https |head -n 1 |awk -F'"' '{print $2}' |awk -F'=' '{print $2}'`
3 | podman exec etcd etcdctl --cacert=/etcd-cert/ca.pem \
4 | --cert=/etcd-cert/etcd.pem \
5 | --key=/etcd-cert/etcd-key.pem \
6 | --endpoints ${etcdurl} endpoint health |grep 'is healthy'
7 | podman exec etcd etcdctl --cacert=/etcd-cert/ca.pem \
8 | --cert=/etcd-cert/etcd.pem \
9 | --key=/etcd-cert/etcd-key.pem \
10 | --endpoints ${etcdurl} endpoint status --cluster -w table
11 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/generate-etcd-certificates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # Check if there are no cert files under /etc/etcd/pki
5 | certificate_path='/etc/etcd/pki'
6 | if [ -f ${certificate_path}/ca.pem ] || [ -f ${certificate_path}/ca-key.pem ] || [ -f ${certificate_path}/etcd.pem ] || [ -f ${certificate_path}/etcd-key.pem ]; then
7 | echo 'Existing etcd certificate files will not be replaced.'
8 | else
9 | # ETCD CA
10 | cd /var/lib/wise2c/tmp/etcd/
11 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca
12 | # ETCD certificate
13 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=127.0.0.1,{% for host in play_hosts %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %} -profile=etcd etcd-csr.json | cfssljson -bare etcd
14 | cd /var/lib/wise2c/tmp/etcd/
15 | cp *.pem /etc/etcd/pki/
16 | fi
17 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/file/make-etcd-snapshot.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | etcdurl=`podman inspect etcd |grep -A1 "\-\-advertise-client-urls" |grep https |head -n 1 |awk -F'"' '{print $2}' |awk -F'=' '{print $2}'`
5 | podman exec etcd etcdctl --cacert=/etcd-cert/ca.pem --cert=/etcd-cert/etcd.pem --key=/etcd-cert/etcd-key.pem --endpoints ${etcdurl} endpoint health
6 | if [ $? -eq 0 ]; then
7 | echo cluster is healthy
8 | backuptime=`date +%F-%H-%M-%S`
9 | etcdurl=`podman inspect etcd |grep -A1 "\-\-advertise-client-urls" |grep https |head -n 1 |awk -F'"' '{print $2}' |awk -F'=' '{print $2}'`
10 | etcdbackup=`podman exec -e ETCDCTL_API=3 etcd etcdctl --cacert=/etcd-cert/ca.pem --cert=/etcd-cert/etcd.pem --key=/etcd-cert/etcd-key.pem --endpoints ${etcdurl} snapshot save snapshotdb-${backuptime} |awk '{print $4}'`
11 | podman cp etcd:/${etcdbackup} ./
12 | else
13 | echo cluster is not healthy
14 | exit 1
15 | fi
16 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/group_vars/etcd.yml:
--------------------------------------------------------------------------------
1 | command:
2 | - etcd
3 | - --name={% for host in ansible_play_batch -%}{% if host == inventory_hostname -%}etcd{{ loop.index0 }}{% endif -%}{% endfor -%}
4 | - --data-dir=/var/lib/etcd
5 | - --advertise-client-urls=https://{{ inventory_hostname }}:2379
6 | - --listen-client-urls=https://{{ inventory_hostname }}:2379
7 | - --listen-peer-urls=https://{{ inventory_hostname }}:2380
8 | - --initial-cluster-token=etcd-cluster
9 | - --initial-advertise-peer-urls=https://{{ inventory_hostname }}:2380
10 | - --initial-cluster={% for host in play_hosts %}etcd{{ loop.index0 }}=https://{{ host }}:2380{% if not loop.last %},{% endif %}{% endfor %}
11 | - --initial-cluster-state=new
12 | - --client-cert-auth
13 | - --trusted-ca-file=/etcd-cert/ca.pem
14 | - --cert-file=/etcd-cert/etcd.pem
15 | - --key-file=/etcd-cert/etcd-key.pem
16 | - --peer-client-cert-auth
17 | - --peer-trusted-ca-file=/etcd-cert/ca.pem
18 | - --peer-cert-file=/etcd-cert/etcd.pem
19 | - --peer-key-file=/etcd-cert/etcd-key.pem
20 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/inherent.yaml:
--------------------------------------------------------------------------------
1 | endpoint: {{ range $i, $v := .Hosts.self -}}
2 | https://{{ $v.IP }}:2379{{if notLast $i $.Hosts.self}},{{end}}
3 | {{- end }}
4 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | path=`dirname $0`
6 |
7 | image=registry.k8s.io/etcd:${1}
8 | echo "" >> ${path}/group_vars/etcd.yml
9 | echo "version: ${1}" >> ${path}/group_vars/etcd.yml
10 |
11 | echo "etcd_version: ${1}" >> ${path}/inherent.yaml
12 |
13 | docker pull ${image}
14 | docker save ${image} > ${path}/file/etcd.tar
15 | bzip2 -z --best ${path}/file/etcd.tar
16 |
17 | export CPUArch=$(uname -m | awk '{ if ($1 == "x86_64") print "amd64"; else if ($1 == "aarch64") print "arm64"; else print $1 }')
18 |
19 | echo "=== download cfssl tools ==="
20 | export CFSSL_VERSION=1.6.4
21 | export CFSSL_URL=https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}
22 | curl -L -o cfssl ${CFSSL_URL}/cfssl_${CFSSL_VERSION}_linux_${CPUArch}
23 | curl -L -o cfssljson ${CFSSL_URL}/cfssljson_${CFSSL_VERSION}_linux_${CPUArch}
24 | curl -L -o cfssl-certinfo ${CFSSL_URL}/cfssl-certinfo_${CFSSL_VERSION}_linux_${CPUArch}
25 | chmod +x cfssl cfssljson cfssl-certinfo
26 | tar zcvf ${path}/file/cfssl-tools.tar.gz cfssl cfssl-certinfo cfssljson
27 | echo "=== cfssl tools is download successfully ==="
28 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "self",
4 | "label": "etcd hosts",
5 | "description": "hosts to be set up as etcd servers",
6 | "type": "host",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "BackupEtcdFolder",
12 | "label": "Back up etcd database folder and upgrade etcd cluster",
13 | "description": "Back up etcd database folder before upgrading. It will stop etcd cluster for a while.",
14 | "type": "bool",
15 | "default": "false",
16 | "required": true
17 | },
18 | {
19 | "variable": "SnapshotEtcd",
20 | "label": "Make a snapshot backup for etcd and upgrade etcd cluster",
21 | "description": "Make a snapshot backup for etcd before upgrading.",
22 | "type": "bool",
23 | "default": "false",
24 | "required": true
25 | },
26 | {
27 | "variable": "EtcdDataPath",
28 | "label": "Etcd database path on the host",
29 | "description": "Etcd database path on the host. Etcd database will be stored in this foler.",
30 | "type": "string",
31 | "default": "/data/etcd",
32 | "required": true
33 | }
34 | ]
35 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean etcd
2 | hosts: etcd
3 | user: root
4 | tasks:
5 | - name: stop & rm old etcd container
6 | containers.podman.podman_container:
7 | name: etcd
8 | state: absent
9 |
10 | - name: clean etcd directory
11 | file:
12 | path: '{{ item }}'
13 | state: absent
14 | with_items:
15 | - "{{ etcd_data_path }}"
16 |
17 | - name: clean etcd cert directory
18 | file:
19 | path: '/etc/etcd/pki'
20 | state: absent
21 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/yat/all.yml.gotmpl:
--------------------------------------------------------------------------------
1 | backup_etcd_folder: {{ .etcd.Property.BackupEtcdFolder }}
2 | snapshot_etcd: {{ .etcd.Property.SnapshotEtcd }}
3 | etcd_data_path: {{ .etcd.Property.EtcdDataPath }}
4 | registry_endpoint: {{ .harbor.Inherent.endpoint }}
5 | registry_https: {{ .harbor.Inherent.https }}
6 | registry_user: {{ .harbor.Inherent.user }}
7 | registry_password: {{ .harbor.Inherent.password }}
8 | registry_project: library
9 |
--------------------------------------------------------------------------------
/etcd-playbook/version-by-kubeadm/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [etcd]
2 | {{ range $v := .etcd.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
--------------------------------------------------------------------------------
/harbor-playbook/external/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/harbor-playbook/external/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/harbor-playbook/external/file/README.md
--------------------------------------------------------------------------------
/harbor-playbook/external/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/harbor-playbook/external/group_vars/README.md
--------------------------------------------------------------------------------
/harbor-playbook/external/inherent.yaml:
--------------------------------------------------------------------------------
1 | endpoint: {{ .Property.endpoint }}
2 | https: {{ .Property.https }}
3 | user: {{ .Property.username }}
4 | password: {{ .Property.password }}
5 |
--------------------------------------------------------------------------------
/harbor-playbook/external/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # echo no action needed for external harbor
3 |
--------------------------------------------------------------------------------
/harbor-playbook/external/install.ansible:
--------------------------------------------------------------------------------
1 | - name: setup external harbor
2 | hosts: harbor
3 | user: root
4 | vars:
5 | path: /var/lib/wise2c/tmp/harbor
6 | tasks:
7 | - name: setup external harbor
8 | shell: |
9 | echo 'setup external harbor'
10 |
--------------------------------------------------------------------------------
/harbor-playbook/external/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "endpoint",
4 | "label": "harbor entry point",
5 | "description": "ip/FQDN of harbor cluster entrypoint",
6 | "type": "string",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "https",
12 | "label": "Https is enabled on external Harbar server",
13 | "description": "Https is enabled on external Harbar server",
14 | "type": "bool",
15 | "default": false,
16 | "required": true
17 | },
18 | {
19 | "variable": "username",
20 | "label": "Username on the external Harbor server",
21 | "description": "Username on the external Harbor server",
22 | "type": "string",
23 | "default": "admin",
24 | "required": true
25 | },
26 | {
27 | "variable": "password",
28 | "label": "Password of the account on the external Harbor server",
29 | "description": "Password of the account on the external Harbor server",
30 | "type": "password",
31 | "default": "Harbor12345",
32 | "required": true
33 | }
34 | ]
35 |
--------------------------------------------------------------------------------
/harbor-playbook/external/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: setup external harbor
2 | hosts: harbor
3 | user: root
4 | tasks:
5 | - name: reset external harbor
6 | shell: |
7 | echo 'no need to reset external harbor'
8 |
--------------------------------------------------------------------------------
/harbor-playbook/external/template/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/harbor-playbook/external/template/README.md
--------------------------------------------------------------------------------
/harbor-playbook/external/yat/harbor.yml.gotmpl:
--------------------------------------------------------------------------------
1 | cpath: /var/lib/wise2c/harbor
2 | endpoint: {{ .harbor.Inherent.endpoint }}
3 | https: {{ .harbor.Inherent.https }}
4 | user: {{ .harbor.Inherent.user }}
5 | password: {{ .harbor.Inherent.password }}
6 |
--------------------------------------------------------------------------------
/harbor-playbook/external/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [harbor]
2 | {{ range $v := .harbor.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
5 |
--------------------------------------------------------------------------------
/harbor-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/harbor-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/harbor-playbook/version/file/README.md
--------------------------------------------------------------------------------
/harbor-playbook/version/file/install-harbor.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sed -i '0,/https:/s//# https:/' harbor.yml
3 | sed -i 's,port: 443,# port: 443,g' harbor.yml
4 | sed -i 's,certificate:,# certificate:,g' harbor.yml
5 | sed -i 's,private_key:,# private_key:,g' harbor.yml
6 |
7 | ./install.sh
8 |
9 | #fix the bug for ARM64 packages
10 | export CPUArch=$(uname -m | awk '{ if ($1 == "x86_64") print "amd64"; else if ($1 == "aarch64") print "arm64"; else print $1 }')
11 |
12 | if [ $CPUArch == 'arm64' ]
13 | then
14 | chmod -R 777 /data/redis
15 | fi
16 |
--------------------------------------------------------------------------------
/harbor-playbook/version/file/start-harbor.sh:
--------------------------------------------------------------------------------
1 | cd /var/lib/wise2c/harbor/harbor
2 | docker-compose start
3 |
--------------------------------------------------------------------------------
/harbor-playbook/version/file/stop-harbor.sh:
--------------------------------------------------------------------------------
1 | cd /var/lib/wise2c/harbor/harbor
2 | docker-compose stop
3 |
--------------------------------------------------------------------------------
/harbor-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/harbor-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/harbor-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 | endpoint: {{ if eq (print .Property.endpoint) "" -}}
2 | {{ (index .Hosts.self 0).IP }}
3 | {{- else -}}
4 | {{ .Property.endpoint }}
5 | {{- end }}
6 | https: false
7 | user: admin
8 | password: {{ if eq (print .Property.password) "" -}}
9 | Harbor12345
10 | {{- else -}}
11 | {{ .Property.password }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/harbor-playbook/version/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | path=`dirname $0`
6 |
7 | version=`cat ${path}/components-version.txt |grep "Harbor" |awk '{print $3}'`
8 |
9 | echo "" >> ${path}/yat/harbor.yml.gotmpl
10 | echo "version: v${version}" >> ${path}/yat/harbor.yml.gotmpl
11 |
12 | curl -L https://github.com/docker/compose/releases/download/v2.26.1/docker-compose-$(uname -s | tr '[A-Z]' '[a-z]')-$(uname -m) -o ${path}/file/docker-compose
13 |
14 | export CPUArch=$(uname -m | awk '{ if ($1 == "x86_64") print "amd64"; else if ($1 == "aarch64") print "arm64"; else print $1 }')
15 |
16 | if [ $CPUArch == 'amd64' ]
17 | then
18 | curl -L https://storage.googleapis.com/harbor-releases/release-${version%.*}.0/harbor-offline-installer-v${version}.tgz \
19 | -o ${path}/file/harbor-offline-installer-v${version}.tgz
20 | else
21 | curl -L https://github.com/wise2c-devops/build-harbor-aarch64/releases/download/v${version}/harbor-offline-installer-aarch64-v${version}.tgz \
22 | -o ${path}/file/harbor-offline-installer-v${version}.tgz
23 | fi
24 |
25 | curl -sSL https://raw.githubusercontent.com/vmware/harbor/v${version}/make/harbor.yml.tmpl \
26 | | sed \
27 | -e "s,hostname: reg\.mydomain\.com,hostname: {{ inventory_hostname }},g" \
28 | -e "s,harbor_admin_password: Harbor12345,harbor_admin_password: {{ password }},g" \
29 | > ${path}/template/harbor.yml.j2
30 |
--------------------------------------------------------------------------------
/harbor-playbook/version/install.ansible:
--------------------------------------------------------------------------------
1 | - name: setup harbor
2 | hosts: harbor
3 | user: root
4 | vars:
5 | path: /var/lib/wise2c/tmp/harbor
6 | tasks:
7 | - name: yum remove crio and podman on Redhat/CentOS/Rocky/Alma/Oracle 8.x
8 | yum:
9 | disablerepo: '*'
10 | enablerepo:
11 | - wise2c-k8s
12 | state: absent
13 | name: '{{ item }}'
14 | with_items:
15 | - cri-o
16 | - podman
17 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS") or (ansible_distribution == "Rocky") or (ansible_distribution == "AlmaLinux") or (ansible_distribution == "OracleLinux") or (ansible_distribution == "Anolis")
18 |
19 | - name: yum install docker on Redhat/CentOS/Rocky/Alma/Oracle
20 | yum:
21 | disablerepo: '*'
22 | enablerepo: wise2c-docker
23 | allowerasing: true
24 | update_cache: true
25 | state: present
26 | name: '{{ item }}'
27 | with_items:
28 | - docker-ce
29 | - docker-ce-cli
30 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS") or (ansible_distribution == "Rocky") or (ansible_distribution == "AlmaLinux") or (ansible_distribution == "OracleLinux") or (ansible_distribution == "Anolis")
31 |
32 | - name: apt remove crio and podman
33 | apt:
34 | state: absent
35 | name: '{{ item }}'
36 | with_items:
37 | - cri-o
38 | - podman
39 | when: ansible_distribution =="Ubuntu"
40 |
41 | - name: apt install docker
42 | apt:
43 | update_cache: true
44 | state: present
45 | name: '{{ item }}'
46 | with_items:
47 | - docker-ce
48 | - docker-ce-cli
49 | when: ansible_distribution =="Ubuntu"
50 |
51 | - name: start docker
52 | systemd:
53 | name: docker
54 | enabled: true
55 | state: started
56 |
57 | - name: make harbor dir
58 | file:
59 | path: '{{ item }}'
60 | state: directory
61 | mode: 0755
62 | with_items:
63 | - '{{ cpath }}'
64 | - '{{ path }}'
65 |
66 | - name: unarchive harbor
67 | unarchive:
68 | src: file/harbor-offline-installer-{{ version }}.tgz
69 | dest: '{{ cpath }}'
70 |
71 | - name: generate harbor config
72 | template:
73 | src: '{{ item.src }}'
74 | dest: '{{ item.dest }}'
75 | with_items:
76 | - { src: 'template/harbor.yml.j2', dest: '{{ cpath }}/harbor/harbor.yml' }
77 |
78 | - name: copy harbor start and stop scripts
79 | copy:
80 | src: '{{ item.src }}'
81 | dest: '{{ item.dest }}'
82 | mode: 0755
83 | with_items:
84 | - { src: 'file/install-harbor.sh', dest: '{{ cpath }}/harbor/install-harbor.sh' }
85 | - { src: 'file/start-harbor.sh', dest: '{{ cpath }}/harbor/start-harbor.sh' }
86 | - { src: 'file/stop-harbor.sh', dest: '{{ cpath }}/harbor/stop-harbor.sh' }
87 | - { src: 'file/docker-compose', dest: '/usr/bin/docker-compose' }
88 |
89 | - name: launch harbor
90 | shell: ./install-harbor.sh
91 | args:
92 | chdir: '{{ cpath }}/harbor'
93 |
--------------------------------------------------------------------------------
/harbor-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "self",
4 | "label": "harbor hosts",
5 | "description": "hosts to be set up as harbor server",
6 | "type": "host",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "endpoint",
12 | "label": "harbor entry point",
13 | "description": "ip/FQDN of harbor cluster entrypoint",
14 | "type": "string",
15 | "default": "",
16 | "required": false
17 | },
18 | {
19 | "variable": "password",
20 | "label": "harbor admin password",
21 | "description": "Password for the Harbor admin account",
22 | "type": "password",
23 | "default": "Harbor12345",
24 | "required": true
25 | }
26 | ]
27 |
--------------------------------------------------------------------------------
/harbor-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean harbor
2 | hosts: harbor
3 | user: root
4 | tasks:
5 | # - name: stop & rm harbor
6 | # docker_service:
7 | # project_src: '{{ cpath }}/harbor'
8 | # state: absent
9 | # remove_volumes: true
10 |
11 | - name: stop & rm harbor
12 | shell: docker-compose stop && docker-compose rm -f
13 | args:
14 | chdir: '{{ cpath }}/harbor'
15 |
16 | - name: clean harbor directory
17 | file:
18 | path: '{{ item }}'
19 | state: absent
20 | with_items:
21 | - /data/registry
22 | - /data/database
23 | - /data/ca_download
24 | - /data/config
25 | - /data/job_logs
26 | - /data/psc
27 | - /data/secretkey
28 | - /data/redis
29 | - '{{ cpath }}'
30 |
--------------------------------------------------------------------------------
/harbor-playbook/version/template/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/harbor-playbook/version/template/README.md
--------------------------------------------------------------------------------
/harbor-playbook/version/yat/harbor.yml.gotmpl:
--------------------------------------------------------------------------------
1 | cpath: /var/lib/wise2c/harbor
2 | password: {{ .harbor.Inherent.password }}
3 |
--------------------------------------------------------------------------------
/harbor-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [harbor]
2 | {{ range $v := .harbor.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
5 |
--------------------------------------------------------------------------------
/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | path=`dirname $0`
6 |
7 | kubernetes_version=1.33.0
8 | harbor_version=2.13.0
9 | docker_version=26.1.3
10 | crio_version=1.33.0
11 | haproxy_version=2.0.0
12 | keepalived_version=1.3.5
13 | loadbalancer_version=HAProxy-${haproxy_version}_Keepalived-${keepalived_version}
14 | prometheus_version=2.54.1
15 | prometheus_operator_version=0.76.2
16 | kube_prometheus_version=0.14.0
17 | metrics_server_version=0.7.2
18 | dashboard_version=2.7.0
19 | metrics_scraper_version=1.0.8
20 | flannel_version=0.26.7
21 | flannel_cni_plugin_version=1.6.2-flannel1
22 | calico_version=3.29.3
23 | helm_version=3.17.3
24 | istio_version=1.23.6
25 | contour_version=1.30.3
26 | contour_envoyproxy_version=1.31.6
27 | elastic_cloud_version=3.0.0
28 | elastic_stack_version=9.0.0
29 |
30 | mv ${path}/kubernetes-playbook/version ${path}/kubernetes-playbook/v${kubernetes_version}
31 | mv ${path}/harbor-playbook/version ${path}/harbor-playbook/v${harbor_version}
32 | mv ${path}/docker-playbook/version ${path}/docker-playbook/${docker_version}-CE
33 | mv ${path}/crio-playbook/version ${path}/crio-playbook/${crio_version}
34 | mv ${path}/loadbalancer-playbook/version ${path}/loadbalancer-playbook/${loadbalancer_version}
35 | mv ${path}/prometheus-playbook/version ${path}/prometheus-playbook/Kube-Prometheus-v${kube_prometheus_version}
36 | mv ${path}/istio-playbook/version ${path}/istio-playbook/v${istio_version}
37 | mv ${path}/elasticcloud-playbook/version ${path}/elasticcloud-playbook/v${elastic_cloud_version}
38 |
39 | docker run --rm --name=kubeadm-version wise2c/kubeadm-version:v${kubernetes_version} kubeadm config images list --kubernetes-version ${kubernetes_version} > ${path}/k8s-images-list.txt
40 |
41 | etcd_version=`cat ${path}/k8s-images-list.txt |grep etcd |awk -F ':' '{print $2}'`
42 | mv etcd-playbook/version-by-kubeadm etcd-playbook/${etcd_version}
43 |
44 | echo "ETCD Version: ${etcd_version}" > ${path}/components-version.txt
45 | echo "Kubernetes Version: ${kubernetes_version}" >> ${path}/components-version.txt
46 | echo "Harbor Version: ${harbor_version}" >> ${path}/components-version.txt
47 | echo "Docker Version: ${docker_version}" >> ${path}/components-version.txt
48 | echo "CRIO Version: ${crio_version}" >> ${path}/components-version.txt
49 | echo "HAProxy Version: ${haproxy_version}" >> ${path}/components-version.txt
50 | echo "Keepalived Version: ${keepalived_version}" >> ${path}/components-version.txt
51 | echo "Prometheus Version: ${prometheus_version}" >> ${path}/components-version.txt
52 | echo "PrometheusOperator Version: ${prometheus_operator_version}" >> ${path}/components-version.txt
53 | echo "KubePrometheus Version: ${kube_prometheus_version}" >> ${path}/components-version.txt
54 | echo "MetricsServer Version: ${metrics_server_version}" >> ${path}/components-version.txt
55 | echo "Dashboard Version: ${dashboard_version}" >> ${path}/components-version.txt
56 | echo "MetricsScraper Version: ${metrics_scraper_version}" >> ${path}/components-version.txt
57 | echo "Flannel Version: ${flannel_version}" >> ${path}/components-version.txt
58 | echo "flannel-cni-plugin Version: ${flannel_cni_plugin_version}" >> ${path}/components-version.txt
59 | echo "Calico Version: ${calico_version}" >> ${path}/components-version.txt
60 | echo "Helm Version: ${helm_version}" >> ${path}/components-version.txt
61 | echo "Istio Version: ${istio_version}" >> ${path}/components-version.txt
62 | echo "Contour Version: ${contour_version}" >> ${path}/components-version.txt
63 | echo "ContourEnvoyProxy Version: ${contour_envoyproxy_version}" >> ${path}/components-version.txt
64 | echo "ElasticCloud Version: ${elastic_cloud_version}" >> ${path}/components-version.txt
65 | echo "ElasticStack Version: ${elastic_stack_version}" >> ${path}/components-version.txt
66 |
67 | for dir in `ls ${path}`
68 | do
69 | if [[ ${dir} =~ -playbook$ ]]; then
70 | chmod -R 775 ${dir}
71 | for version in `ls ${path}/${dir}`
72 | do
73 | echo ${version}
74 | if [ -f ${path}/${dir}/${version}/init.sh ]; then
75 | cp ${path}/components-version.txt ${path}/${dir}/${version}/
76 | bash ${path}/${dir}/${version}/init.sh ${version}
77 | fi
78 | done
79 | fi
80 | done
81 |
--------------------------------------------------------------------------------
/istio-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/istio-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/istio-playbook/version/file/README.md
--------------------------------------------------------------------------------
/istio-playbook/version/file/clean-images-tags.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /var/lib/wise2c/tmp/istio
3 | for file in $(cat images-list.txt); do podman rmi $file ; done
4 |
--------------------------------------------------------------------------------
/istio-playbook/version/file/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | #It seems that there is a bug on Ubuntu host to load the images. If no wait, it will return an error message: "Error response from daemon: No such image"
5 | #if [ ! -f /etc/redhat-release ]; then
6 | # sleep 60
7 | #fi
8 |
9 | MyImageRepositoryIP=`cat harbor-address.txt`
10 | MyImageRepositoryProject=library
11 | IstioVersion=`cat components-version.txt |grep "Istio Version" |awk '{print $3}'`
12 |
13 | ######### Push images #########
14 | cat images-list.txt |grep -v quay.io/ > images-list-crio.txt
15 | sed -i 's#docker.io/##g' images-list-crio.txt
16 | cat images-list.txt |grep "quay.io\/" > images-list-quay.txt
17 |
18 | for file in $(cat images-list-crio.txt); do podman tag $file $MyImageRepositoryIP/$MyImageRepositoryProject/${file##*/}; done
19 | for file in $(cat images-list-quay.txt); do podman tag $file $MyImageRepositoryIP/$MyImageRepositoryProject/${file##*/}; done
20 |
21 | echo 'Images taged.'
22 |
23 | for file in $(cat images-list-crio.txt); do podman push $MyImageRepositoryIP/$MyImageRepositoryProject/${file##*/}; done
24 | for file in $(cat images-list-quay.txt); do podman push $MyImageRepositoryIP/$MyImageRepositoryProject/${file##*/}; done
25 |
26 | echo 'Images pushed.'
27 |
28 | # Istio deploy
29 | rm -rf istio-$IstioVersion
30 | tar zxvf istio-$IstioVersion-origin.tar.gz
31 | cd istio-$IstioVersion/
32 | rm -f /usr/bin/istioctl
33 | cp bin/istioctl /usr/bin/
34 |
35 | istioctl install -y --set profile=demo --set hub=$MyImageRepositoryIP\/$MyImageRepositoryProject
36 |
37 | sed -i "s,image: \"docker.io/grafana/,image: \"$MyImageRepositoryIP/$MyImageRepositoryProject/,g" samples/addons/grafana.yaml
38 | sed -i "s,image: \"docker.io/jaegertracing/,image: \"$MyImageRepositoryIP/$MyImageRepositoryProject/,g" samples/addons/jaeger.yaml
39 | sed -i "s,image: \"ghcr.io/prometheus-operator/,image: \"$MyImageRepositoryIP/$MyImageRepositoryProject/,g" samples/addons/prometheus.yaml
40 | sed -i "s,image: \"jimmidyson/,image: \"$MyImageRepositoryIP/$MyImageRepositoryProject/,g" samples/addons/prometheus.yaml
41 | sed -i "s,image: \"prom/,image: \"$MyImageRepositoryIP/$MyImageRepositoryProject/,g" samples/addons/prometheus.yaml
42 | sed -i "s,- image: \"quay.io/kiali/,- image: \"$MyImageRepositoryIP/$MyImageRepositoryProject/,g" samples/addons/kiali.yaml
43 | sed -i "s,strategy: anonymous,strategy: token,g" samples/addons/kiali.yaml
44 |
45 | set +e
46 | # We need to verify that all 14 Istio CRDs were committed to the Kubernetes api-server
47 | printf "Waiting for Istio to commit custom resource definitions..."
48 |
49 | until [ `kubectl get crds |grep 'istio.io\|certmanager.k8s.io' |wc -l` -eq 14 ]; do printf "."; done
50 |
51 | crdresult=""
52 | for ((i=1; i<=14; i++)); do crdresult=${crdresult}"True"; done
53 |
54 | until [ `for istiocrds in $(kubectl get crds |grep 'istio.io\|certmanager.k8s.io' |awk '{print $1}'); do kubectl get crd ${istiocrds} -o jsonpath='{.status.conditions[1].status}'; done` = $crdresult ]; do sleep 1; printf "."; done
55 |
56 | echo 'Istio CRD is ready!'
57 |
58 | kubectl apply -f samples/addons/kiali.yaml
59 | kubectl apply -f samples/addons/prometheus.yaml
60 | kubectl apply -f samples/addons/grafana.yaml
61 | kubectl apply -f samples/addons/jaeger.yaml
62 | #kubectl apply -f samples/addons/prometheus_vm.yaml
63 | #kubectl apply -f samples/addons/prometheus_vm_tls.yaml
64 |
65 | set -e
66 |
67 | kubectl apply -f /var/lib/wise2c/tmp/istio/kiali-service.yaml
68 | kubectl apply -f /var/lib/wise2c/tmp/istio/jaeger-service.yaml
69 | kubectl apply -f /var/lib/wise2c/tmp/istio/prometheus-service.yaml
70 | kubectl apply -f /var/lib/wise2c/tmp/istio/grafana-service.yaml
71 |
72 | echo 'NodePorts have been set for services.'
73 |
--------------------------------------------------------------------------------
/istio-playbook/version/file/harbor-address.txt:
--------------------------------------------------------------------------------
1 | harbor-address
2 |
--------------------------------------------------------------------------------
/istio-playbook/version/file/remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /var/lib/wise2c/tmp/istio/
3 | IstioVersion=`cat components-version.txt |grep "Istio Version" |awk '{print $3}'`
4 | cd istio-$IstioVersion/
5 | istioctl manifest generate | kubectl delete -f -
6 | kubectl delete ns istio-system
7 |
--------------------------------------------------------------------------------
/istio-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/istio-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/istio-playbook/version/group_vars/istio.yml:
--------------------------------------------------------------------------------
1 | cpath: /var/lib/wise2c/tmp/istio
2 |
--------------------------------------------------------------------------------
/istio-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/istio-playbook/version/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | path=`dirname $0`
6 |
7 | IstioVersion=`cat ${path}/components-version.txt |grep "Istio" |awk '{print $3}'`
8 |
9 | echo "" >> ${path}/group_vars/istio.yml
10 | echo "istio_version: ${IstioVersion}" >> ${path}/group_vars/istio.yml
11 |
12 | export CPUArch=$(uname -m | awk '{ if ($1 == "x86_64") print "amd64"; else if ($1 == "aarch64") print "arm64"; else print $1 }')
13 | curl -L -o ${path}/file/istio-$IstioVersion-origin.tar.gz https://github.com/istio/istio/releases/download/$IstioVersion/istio-$IstioVersion-linux-${CPUArch}.tar.gz
14 |
15 | cd ${path}/file/
16 | tar zxf istio-$IstioVersion-origin.tar.gz
17 | echo "istio/proxyv2:$IstioVersion" > images-list.txt
18 | echo "istio/pilot:$IstioVersion" >> images-list.txt
19 | echo "istio/operator:$IstioVersion" >> images-list.txt
20 | cat istio-$IstioVersion/samples/addons/prometheus.yaml |grep image: |grep prom |awk '{print $2}' |awk -F "[\"]" '{print $2}' >> images-list.txt
21 | cat istio-$IstioVersion/samples/addons/prometheus.yaml |grep image: |grep configmap |awk '{print $2}' |awk -F "[\"]" '{print $2}' >> images-list.txt
22 | cat istio-$IstioVersion/samples/addons/grafana.yaml |grep image: |grep grafana |awk '{print $2}' |awk -F "[\"]" '{print $2}' >> images-list.txt
23 | cat istio-$IstioVersion/samples/addons/jaeger.yaml |grep image: |grep jaegertracing |awk '{print $2}' |awk -F "[\"]" '{print $2}' >> images-list.txt
24 | cat istio-$IstioVersion/samples/addons/extras/zipkin.yaml |grep image: |grep zipkin |awk '{print $2}' >> images-list.txt
25 | cat istio-$IstioVersion/samples/addons/kiali.yaml |grep image: |awk '{print $3}' |awk -F "[\"]" '{print $2}' >> images-list.txt
26 | cat istio-$IstioVersion/manifests/profiles/default.yaml |grep coredns-plugin |awk '{print $2}' >> images-list.txt
27 |
28 | echo 'Images list for Istio:'
29 | cat images-list.txt
30 |
31 | for file in $(cat images-list.txt); do docker pull $file; done
32 | echo 'Images pulled.'
33 |
34 | docker save $(cat images-list.txt) -o istio-images-$IstioVersion.tar
35 | echo 'Images saved.'
36 | bzip2 -z --best istio-images-$IstioVersion.tar
37 | echo 'Images are compressed as bzip format.'
38 |
39 | rm -rf istio-$IstioVersion
40 |
--------------------------------------------------------------------------------
/istio-playbook/version/install.ansible:
--------------------------------------------------------------------------------
1 | - name: setup istio
2 | hosts: istio
3 | user: root
4 | vars:
5 | path: /var/lib/wise2c/tmp/istio
6 | tasks:
7 | - name: make istio dir
8 | file:
9 | path: '{{ item }}'
10 | state: directory
11 | mode: 0755
12 | with_items:
13 | - '{{ cpath }}'
14 | - '{{ path }}'
15 | run_once: true
16 |
17 | - name: copy istio tarball and images
18 | copy:
19 | src: '{{ item.src }}'
20 | dest: '{{ item.dest }}'
21 | with_items:
22 | - { src: 'file/istio-{{ istio_version }}-origin.tar.gz', dest: '{{ path }}' }
23 | - { src: 'file/istio-images-{{ istio_version }}.tar.bz2', dest: '{{ path }}' }
24 | run_once: true
25 |
26 | - name: copy istio deploy and reset script
27 | copy:
28 | src: '{{ item.src }}'
29 | dest: '{{ item.dest }}'
30 | mode: 0755
31 | with_items:
32 | - { src: 'file/deploy.sh', dest: '{{ path }}' }
33 | - { src: 'file/remove.sh', dest: '{{ path }}' }
34 | run_once: true
35 |
36 | - name: copy istio deploy script dependance file
37 | copy:
38 | src: '{{ item.src }}'
39 | dest: '{{ item.dest }}'
40 | with_items:
41 | - { src: './components-version.txt', dest: '{{ path }}' }
42 | - { src: 'file/images-list.txt', dest: '{{ path }}' }
43 | - { src: 'file/harbor-address.txt', dest: '{{ path }}' }
44 | run_once: true
45 |
46 | - name: load istio images
47 | shell: |
48 | podman load -i {{ path }}/istio-images-{{ istio_version }}.tar.bz2
49 | async: 300
50 | poll: 5
51 | run_once: true
52 |
53 | - name: podman login
54 | containers.podman.podman_login:
55 | registry: '{{ registry_endpoint }}'
56 | username: '{{ registry_user }}'
57 | password: '{{ registry_password }}'
58 | run_once: true
59 |
60 | - name: set harbor address for deploy script
61 | replace:
62 | path: "/var/lib/wise2c/tmp/istio/{{ item }}"
63 | regexp: "harbor-address"
64 | replace: "{{ registry_endpoint }}"
65 | with_items:
66 | - "harbor-address.txt"
67 | run_once: true
68 |
69 | - name: set nodeport for kiali jaeger prometheus and grafana service
70 | template:
71 | src: '{{ item.src }}'
72 | dest: '{{ item.dest }}'
73 | with_items:
74 | - { src: 'template/kiali-service.yaml.j2', dest: '{{ cpath }}/kiali-service.yaml' }
75 | - { src: 'template/jaeger-service.yaml.j2', dest: '{{ cpath }}/jaeger-service.yaml' }
76 | - { src: 'template/prometheus-service.yaml.j2', dest: '{{ cpath }}/prometheus-service.yaml' }
77 | - { src: 'template/grafana-service.yaml.j2', dest: '{{ cpath }}/grafana-service.yaml' }
78 | run_once: true
79 |
80 | - name: istio deploy
81 | shell: ./deploy.sh
82 | args:
83 | chdir: '{{ cpath }}/'
84 | run_once: true
85 |
86 | - name: copy clean-images-tags.sh
87 | copy:
88 | src: '{{ item.src }}'
89 | dest: '{{ item.dest }}'
90 | mode: 0755
91 | with_items:
92 | - { src: 'file/clean-images-tags.sh', dest: '{{ path }}' }
93 | run_once: true
94 |
95 | - name: clean image tag
96 | shell: ./clean-images-tags.sh
97 | args:
98 | chdir: '{{ cpath }}/'
99 | run_once: true
100 |
--------------------------------------------------------------------------------
/istio-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "self",
4 | "label": "Istio installation node",
5 | "description": "please select a kubernetes master node for installation",
6 | "type": "host",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "KialiNodePort",
12 | "label": "NodePort for Kiali in istio-system",
13 | "description": "NodePort for Kiali service, e.g.: 30201",
14 | "type": "int",
15 | "default": "30201",
16 | "required": true
17 | },
18 | {
19 | "variable": "JaegerNodePort",
20 | "label": "NodePort for Jaeger in istio-system",
21 | "description": "NodePort for Jaeger service, e.g.: 30280",
22 | "type": "int",
23 | "default": "30280",
24 | "required": true
25 | },
26 | {
27 | "variable": "PrometheusNodePort",
28 | "label": "NodePort for Prometheus in istio-system",
29 | "description": "NodePort for Prometheus service, e.g.: 30290",
30 | "type": "int",
31 | "default": "30290",
32 | "required": true
33 | },
34 | {
35 | "variable": "GrafanaNodePort",
36 | "label": "NodePort for Grafana in istio-system",
37 | "description": "NodePort for Grafana service, e.g.: 30292",
38 | "type": "int",
39 | "default": "30292",
40 | "required": true
41 | }
42 | ]
43 |
--------------------------------------------------------------------------------
/istio-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean istio
2 | hosts: all
3 | user: root
4 | tasks:
5 | - name: stop & rm istio service
6 | shell: ./remove.sh
7 | args:
8 | chdir: '{{ cpath }}'
9 | ignore_errors: true
10 |
--------------------------------------------------------------------------------
/istio-playbook/version/template/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/istio-playbook/version/template/README.md
--------------------------------------------------------------------------------
/istio-playbook/version/template/grafana-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: grafana
6 | chart: grafana
7 | heritage: Tiller
8 | release: istio
9 | name: grafana
10 | namespace: istio-system
11 | spec:
12 | externalTrafficPolicy: Cluster
13 | ports:
14 | - name: http
15 | nodePort: {{ grafana_nodeport }}
16 | port: 3000
17 | protocol: TCP
18 | targetPort: 3000
19 | selector:
20 | app: grafana
21 | sessionAffinity: None
22 | type: NodePort
23 |
--------------------------------------------------------------------------------
/istio-playbook/version/template/jaeger-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: jaeger
6 | chart: tracing
7 | heritage: Tiller
8 | release: istio
9 | name: tracing
10 | namespace: istio-system
11 | spec:
12 | externalTrafficPolicy: Cluster
13 | ports:
14 | - name: tracing-http-query
15 | nodePort: {{ jaeger_nodeport }}
16 | port: 80
17 | protocol: TCP
18 | targetPort: 16686
19 | selector:
20 | app: jaeger
21 | sessionAffinity: None
22 | type: NodePort
23 |
--------------------------------------------------------------------------------
/istio-playbook/version/template/kiali-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: kiali
6 | chart: kiali
7 | heritage: Tiller
8 | release: istio
9 | name: kiali
10 | namespace: istio-system
11 | spec:
12 | externalTrafficPolicy: Cluster
13 | ports:
14 | - name: http-kiali
15 | nodePort: {{ kiali_nodeport }}
16 | port: 20001
17 | protocol: TCP
18 | targetPort: 20001
19 | selector:
20 | app: kiali
21 | sessionAffinity: None
22 | type: NodePort
23 |
--------------------------------------------------------------------------------
/istio-playbook/version/template/prometheus-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | prometheus.io/scrape: "true"
6 | labels:
7 | app: prometheus
8 | chart: prometheus
9 | heritage: Tiller
10 | release: istio
11 | name: prometheus
12 | namespace: istio-system
13 | spec:
14 | externalTrafficPolicy: Cluster
15 | ports:
16 | - name: http-prometheus
17 | nodePort: {{ prometheus_nodeport }}
18 | port: 9090
19 | protocol: TCP
20 | targetPort: 9090
21 | selector:
22 | app: prometheus
23 | sessionAffinity: None
24 | type: NodePort
25 |
--------------------------------------------------------------------------------
/istio-playbook/version/yat/all.yml.gotmpl:
--------------------------------------------------------------------------------
1 | registry_endpoint: {{ .harbor.Inherent.endpoint }}
2 | registry_user: {{ .harbor.Inherent.user }}
3 | registry_password: {{ .harbor.Inherent.password }}
4 | registry_project: library
5 | etcd_endpoint: {{ .etcd.Inherent.endpoint }}
6 | kiali_nodeport: {{ .istio.Property.KialiNodePort }}
7 | jaeger_nodeport: {{ .istio.Property.JaegerNodePort }}
8 | prometheus_nodeport: {{ .istio.Property.PrometheusNodePort }}
9 | grafana_nodeport: {{ .istio.Property.GrafanaNodePort }}
10 | kubernetes_endpoint: {{ .kubernetes.Inherent.endpoint }}
11 | kubernetes_version: {{ .kubernetes.Inherent.version }}
12 |
--------------------------------------------------------------------------------
/istio-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [istio]
2 | {{ range $v := .istio.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
5 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/kubernetes-playbook/version/copy-upgrade-scripts.ansible:
--------------------------------------------------------------------------------
1 | - name: copy upgrade scripts for Redhat/CentOS
2 | template:
3 | src: '{{ item.src }}'
4 | dest: '{{ item.dest }}'
5 | mode: 0755
6 | with_items:
7 | - { src: 'template/upgrade/redhat-centos/upgrade_first_master_node.sh.j2', dest: '{{ path }}/upgrade_first_master_node.sh' }
8 | - { src: 'template/upgrade/redhat-centos/upgrade_other_master_nodes.sh.j2', dest: '{{ path }}/upgrade_other_master_nodes.sh' }
9 | - { src: 'template/upgrade/redhat-centos/upgrade_worker_nodes.sh.j2', dest: '{{ path }}/upgrade_worker_nodes.sh' }
10 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS") or (ansible_distribution == "Rocky") or (ansible_distribution == "AlmaLinux") or (ansible_distribution == "OracleLinux") or (ansible_distribution == "Anolis")
11 |
12 | - name: copy upgrade scripts for Ubuntu
13 | template:
14 | src: '{{ item.src }}'
15 | dest: '{{ item.dest }}'
16 | mode: 0755
17 | with_items:
18 | - { src: 'template/upgrade/ubuntu/upgrade_first_master_node.sh.j2', dest: '{{ path }}/upgrade_first_master_node.sh' }
19 | - { src: 'template/upgrade/ubuntu/upgrade_other_master_nodes.sh.j2', dest: '{{ path }}/upgrade_other_master_nodes.sh' }
20 | - { src: 'template/upgrade/ubuntu/upgrade_worker_nodes.sh.j2', dest: '{{ path }}/upgrade_worker_nodes.sh' }
21 | when: ansible_distribution =="Ubuntu"
22 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/admin-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes-admin",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Guangdong",
11 | "L": "Shenzhen",
12 | "O": "system:masters",
13 | "OU": "Wise2C"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/apiserver-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kube-apiserver",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "Guangdong",
11 | "L": "Shenzhen",
12 | "O": "Kubernetes",
13 | "OU": "Wise2C"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "438000h"
5 | },
6 | "profiles": {
7 | "kubernetes": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth",
12 | "client auth"
13 | ],
14 | "expiry": "438000h"
15 | }
16 | }
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "ca": {
4 | "expiry": "876000h"
5 | },
6 | "key": {
7 | "algo": "rsa",
8 | "size": 2048
9 | },
10 | "names": [
11 | {
12 | "C": "CN",
13 | "ST": "Guangdong",
14 | "L": "Shenzhen",
15 | "O": "Kubernetes",
16 | "OU": "Wise2C"
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/controller-manager-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-controller-manager",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/front-proxy-ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "front-proxy-ca",
3 | "ca": {
4 | "expiry": "876000h"
5 | },
6 | "key": {
7 | "algo": "rsa",
8 | "size": 2048
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/front-proxy-client-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "front-proxy-client",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/generate-apiserver-certificate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Check if there are no api server cert files under /etc/kubernetes/pki
3 |
4 | set -e
5 |
6 | if [ -f "/etc/kubernetes/pki/apiserver.crt" ] || [ -f "/etc/kubernetes/pki/apiserver.key" ] ; then
7 | echo "/etc/kubernetes/pki/apiserver.crt or /etc/kubernetes/pki/apiserver.key already exists!"
8 | echo "Please execute command kubeadm reset -f if you want to reinstall the cluster."
9 | exit 1
10 | fi
11 |
12 | set +e
13 |
14 | # Get host IP address and hostname
15 | WISE2C_IP_LABEL=$(cat /etc/hosts |grep -A 1 'BEGIN WISE2C DEPLOY MANAGED BLOCK' |grep -v '#' |grep -v '^\-\-' |wc |awk '{print $1}')
16 |
17 | if [ "${WISE2C_IP_LABEL}" = "0" ]; then
18 | HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
19 | HOST_NAME=$(hostname)
20 | else
21 | for IP_Addresses in $(cat /etc/hosts |grep -A 1 'BEGIN WISE2C DEPLOY MANAGED BLOCK' |grep -v '#' |grep -v '^\-\-' |awk '{print $1}');
22 | do
23 | GrepStr=$(ip a |grep "inet $IP_Addresses")
24 | if [ -n "$GrepStr" ]; then
25 | HOST_IP=$IP_Addresses
26 | HOST_NAME=$(cat /etc/hosts |grep -A 1 'BEGIN WISE2C DEPLOY MANAGED BLOCK' |grep -v '#' |grep -v '^\-\-' |grep -w $HOST_IP |awk '{print $2}')
27 | fi
28 | done;
29 | fi
30 |
31 | HOST_VIP=`cat /var/lib/wise2c/tmp/kubernetes/kubeadm.conf | grep -A 1 SAN | tail -1 | awk '{print $2}'`
32 |
33 | set -e
34 |
35 | # Get apiserver address
36 | service_cidr=`cat /var/lib/wise2c/tmp/kubernetes/service_cidr.txt`
37 | apiserver_ip=`ipcalc ${service_cidr} |grep HostMin |awk '{print $2}'`
38 |
39 | if [ -z $apiserver_ip ]
40 | then
41 | echo "apiserver_ip is null"
42 | exit 1
43 | fi
44 |
45 | # K8S apiserver certificate
46 | cd /var/lib/wise2c/tmp/kubernetes
47 | cfssl gencert -ca=/etc/kubernetes/pki/ca.crt -ca-key=/etc/kubernetes/pki/ca.key -config=ca-config.json -hostname=127.0.0.1,$apiserver_ip,$HOST_IP,$HOST_VIP,$HOST_NAME,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster.local -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver
48 |
49 | cd /var/lib/wise2c/tmp/kubernetes/
50 | mv apiserver.pem /etc/kubernetes/pki/apiserver.crt
51 | mv apiserver-key.pem /etc/kubernetes/pki/apiserver.key
52 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/generate-other-certificates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | # Check if there are no cert files under /etc/kubernetes/pki
4 | if [ "`ls -A /etc/kubernetes/pki/`" != "" ]; then
5 | exit 1
6 | fi
7 |
8 | # K8S CA
9 |
10 | cd /var/lib/wise2c/tmp/kubernetes/
11 |
12 | cfssl gencert -initca ca-csr.json | cfssljson -bare /var/lib/wise2c/tmp/kubernetes/ca
13 |
14 | # K8S apiserver-kubelet-client certificate
15 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubelet-csr.json | cfssljson -bare apiserver-kubelet-client
16 |
17 | # K8S front-proxy certificate
18 | cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
19 | cfssl gencert -ca=front-proxy-ca.pem -ca-key=front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client
20 |
21 | # K8S Service Account Key
22 | openssl genrsa -out sa.key 2048
23 | openssl rsa -in sa.key -pubout -out sa.pub
24 |
25 | #生成admin证书
26 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
27 |
28 | #生成controller-manager证书
29 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes controller-manager-csr.json | cfssljson -bare controller-manager
30 |
31 | #生成scheduler证书
32 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes scheduler-csr.json | cfssljson -bare scheduler
33 |
34 | cd /var/lib/wise2c/tmp/kubernetes/
35 | mv ca.pem /etc/kubernetes/pki/ca.crt
36 | mv ca-key.pem /etc/kubernetes/pki/ca.key
37 | mv apiserver-kubelet-client.pem /etc/kubernetes/pki/apiserver-kubelet-client.crt
38 | mv apiserver-kubelet-client-key.pem /etc/kubernetes/pki/apiserver-kubelet-client.key
39 | mv front-proxy-ca.pem /etc/kubernetes/pki/front-proxy-ca.crt
40 | mv front-proxy-ca-key.pem /etc/kubernetes/pki/front-proxy-ca.key
41 | mv front-proxy-client.pem /etc/kubernetes/pki/front-proxy-client.crt
42 | mv front-proxy-client-key.pem /etc/kubernetes/pki/front-proxy-client.key
43 | mv sa.pub /etc/kubernetes/pki/sa.pub
44 | mv sa.key /etc/kubernetes/pki/sa.key
45 |
46 | mv *.pem /etc/kubernetes/pki/
47 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/ipcalc-0.41.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/kubernetes-playbook/version/file/ipcalc-0.41.tar.gz
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/ipcalc-url.txt:
--------------------------------------------------------------------------------
1 | http://jodies.de/ipcalc-archive/
2 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/kube-proxy-ds-patch.yaml:
--------------------------------------------------------------------------------
1 | spec:
2 | template:
3 | spec:
4 | affinity:
5 | nodeAffinity:
6 | requiredDuringSchedulingIgnoredDuringExecution:
7 | nodeSelectorTerms:
8 | - matchExpressions:
9 | - key: node-role.kubernetes.io/control-plane
10 | operator: DoesNotExist
11 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/kubeadm-generate-join-command.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | kubeadm token create --print-join-command > /var/lib/wise2c/tmp/kubernetes/worker-join-command.sh
3 | chmod +x /var/lib/wise2c/tmp/kubernetes/worker-join-command.sh
4 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/kubeadm-token.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | kubeadm_token=`kubeadm token generate`
3 | sed -i "s/wise2c-breeze-token/${kubeadm_token}/g" ./kubeadm.conf
4 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/kubelet-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kube-apiserver-kubelet-client",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "L": "Guangdong",
11 | "ST": "Shenzhen",
12 | "O": "system:masters",
13 | "OU": "Wise2C"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/metrics-server/aggregated-metrics-reader.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: system:aggregated-metrics-reader
5 | labels:
6 | rbac.authorization.k8s.io/aggregate-to-view: "true"
7 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
8 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
9 | rules:
10 | - apiGroups: ["metrics.k8s.io"]
11 | resources: ["pods"]
12 | verbs: ["get", "list", "watch"]
13 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/metrics-server/auth-delegator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1beta1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: metrics-server:system:auth-delegator
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: system:auth-delegator
10 | subjects:
11 | - kind: ServiceAccount
12 | name: metrics-server
13 | namespace: kube-system
14 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/metrics-server/auth-reader.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1beta1
3 | kind: RoleBinding
4 | metadata:
5 | name: metrics-server-auth-reader
6 | namespace: kube-system
7 | roleRef:
8 | apiGroup: rbac.authorization.k8s.io
9 | kind: Role
10 | name: extension-apiserver-authentication-reader
11 | subjects:
12 | - kind: ServiceAccount
13 | name: metrics-server
14 | namespace: kube-system
15 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/metrics-server/metrics-apiservice.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiregistration.k8s.io/v1beta1
3 | kind: APIService
4 | metadata:
5 | name: v1beta1.metrics.k8s.io
6 | spec:
7 | service:
8 | name: metrics-server
9 | namespace: kube-system
10 | group: metrics.k8s.io
11 | version: v1beta1
12 | insecureSkipTLSVerify: true
13 | groupPriorityMinimum: 100
14 | versionPriority: 100
15 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/metrics-server/metrics-server-service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: metrics-server
6 | namespace: kube-system
7 | labels:
8 | kubernetes.io/name: "Metrics-server"
9 | kubernetes.io/cluster-service: "true"
10 | spec:
11 | selector:
12 | k8s-app: metrics-server
13 | ports:
14 | - port: 443
15 | protocol: TCP
16 | targetPort: 443
17 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/metrics-server/resource-reader.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: system:metrics-server
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - pods
11 | - nodes
12 | - nodes/stats
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | ---
18 | apiVersion: rbac.authorization.k8s.io/v1
19 | kind: ClusterRoleBinding
20 | metadata:
21 | name: system:metrics-server
22 | roleRef:
23 | apiGroup: rbac.authorization.k8s.io
24 | kind: ClusterRole
25 | name: system:metrics-server
26 | subjects:
27 | - kind: ServiceAccount
28 | name: metrics-server
29 | namespace: kube-system
30 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/patch-kubeconfig.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /etc/kubernetes/
3 |
4 | kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=admin.conf
5 |
6 | kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/pki/controller-manager.pem --client-key=/etc/kubernetes/pki/controller-manager-key.pem --embed-certs=true --kubeconfig=controller-manager.conf
7 |
8 | kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/kubernetes/pki/scheduler.pem --client-key=/etc/kubernetes/pki/scheduler-key.pem --embed-certs=true --kubeconfig=scheduler.conf
9 |
10 | #restart controller-manager and scheduler
11 | #podman ps|grep kube-controller-manager|awk '{print $1}'|xargs podman stop
12 | #podman ps|grep kube-scheduler|awk '{print $1}'|xargs podman stop
13 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/patch-kubelet-conf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | TIME_STRING=`date "+%Y-%m-%d-%H-%M-%S"`
3 | cd /etc/kubernetes/
4 | cp -p /etc/kubernetes/kubelet.conf /etc/kubernetes/kubelet.conf.$TIME_STRING
5 | sed -i 's#client-certificate-data:.*$#client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem#g' kubelet.conf
6 | sed -i 's#client-key-data:.*$#client-key: /var/lib/kubelet/pki/kubelet-client-current.pem#g' kubelet.conf
7 | systemctl restart kubelet
8 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/prometheus-fix-master-nodes-ubuntu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | KUBEADM_SYSTEMD_CONF=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
5 | #KUBEADM_SYSTEMD_CONF=/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
6 | sed -e "/cadvisor-port=0/d" -i "$KUBEADM_SYSTEMD_CONF"
7 | if ! grep -q "authentication-token-webhook=true" "$KUBEADM_SYSTEMD_CONF"; then
8 | sed -e "s/--authorization-mode=Webhook/--authentication-token-webhook=true --authorization-mode=Webhook/" -i "$KUBEADM_SYSTEMD_CONF"
9 | fi
10 |
11 | # Only for kubeadm setup
12 | sed -e "s/- --address=127.0.0.1/- --address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-controller-manager.yaml
13 | sed -e "s/- --address=127.0.0.1/- --address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-scheduler.yaml
14 |
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/prometheus-fix-master-nodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | #KUBEADM_SYSTEMD_CONF=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
5 | KUBEADM_SYSTEMD_CONF=/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
6 | sed -e "/cadvisor-port=0/d" -i "$KUBEADM_SYSTEMD_CONF"
7 | if ! grep -q "authentication-token-webhook=true" "$KUBEADM_SYSTEMD_CONF"; then
8 | sed -e "s/--authorization-mode=Webhook/--authentication-token-webhook=true --authorization-mode=Webhook/" -i "$KUBEADM_SYSTEMD_CONF"
9 | fi
10 |
11 | # Only for kubeadm setup
12 | sed -e "s/- --address=127.0.0.1/- --address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-controller-manager.yaml
13 | sed -e "s/- --address=127.0.0.1/- --address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-scheduler.yaml
14 |
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/prometheus-fix-worker-nodes-ubuntu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | KUBEADM_SYSTEMD_CONF=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
5 | #KUBEADM_SYSTEMD_CONF=/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
6 | sed -e "/cadvisor-port=0/d" -i "$KUBEADM_SYSTEMD_CONF"
7 | if ! grep -q "authentication-token-webhook=true" "$KUBEADM_SYSTEMD_CONF"; then
8 | sed -e "s/--authorization-mode=Webhook/--authentication-token-webhook=true --authorization-mode=Webhook/" -i "$KUBEADM_SYSTEMD_CONF"
9 | fi
10 |
11 | systemctl daemon-reload
12 | systemctl restart kubelet
13 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/prometheus-fix-worker-nodes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | #KUBEADM_SYSTEMD_CONF=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
5 | KUBEADM_SYSTEMD_CONF=/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
6 | sed -e "/cadvisor-port=0/d" -i "$KUBEADM_SYSTEMD_CONF"
7 | if ! grep -q "authentication-token-webhook=true" "$KUBEADM_SYSTEMD_CONF"; then
8 | sed -e "s/--authorization-mode=Webhook/--authentication-token-webhook=true --authorization-mode=Webhook/" -i "$KUBEADM_SYSTEMD_CONF"
9 | fi
10 |
11 | systemctl daemon-reload
12 | systemctl restart kubelet
13 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/scheduler-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-scheduler",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/file/update-api-advertise-address.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set +e
3 |
4 | # Get host IP address
5 | WISE2C_IP_LABEL=$(cat /etc/hosts |grep -A 1 'BEGIN WISE2C DEPLOY MANAGED BLOCK' |grep -v '#' |grep -v '^\-\-' |wc |awk '{print $1}')
6 |
7 | if [ "${WISE2C_IP_LABEL}" = "0" ]; then
8 | HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
9 | else
10 | for IP_Addresses in $(cat /etc/hosts |grep -A 1 'BEGIN WISE2C DEPLOY MANAGED BLOCK' |grep -v '#' |grep -v '^\-\-' |awk '{print $1}');
11 | do
12 | GrepStr=$(ip a |grep -w "inet $IP_Addresses")
13 | if [ -n "$GrepStr" ]; then
14 | HOST_IP=$IP_Addresses
15 | fi
16 | done;
17 | fi
18 |
19 | sed -i "s/advertiseAddress: 127.0.0.1/advertiseAddress: ${HOST_IP}/g" kubeadm.conf
20 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/kubernetes-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/kubernetes-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 | endpoint: {{ if eq (print .Property.endpoint) "" -}}
2 | {{ (index .Hosts.master 0).IP }}:6443
3 | {{- else -}}
4 | {{ .Property.endpoint }}
5 | {{- end }}
6 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/push-images.ansible:
--------------------------------------------------------------------------------
1 | - name: copy k8s images
2 | copy:
3 | src: '{{ item.src }}'
4 | dest: '{{ item.dest }}'
5 | with_items:
6 | - { src: 'file/k8s.tar.bz2', dest: '{{ path }}' }
7 | - { src: 'file/flannel.tar.bz2', dest: '{{ path }}' }
8 | - { src: 'file/dashboard.tar.bz2', dest: '{{ path }}' }
9 | - { src: 'file/metrics-server.tar.bz2', dest: '{{ path }}' }
10 | run_once: true
11 |
12 | - name: load k8s images
13 | shell: |
14 | podman load -i '{{ path }}/{{ item }}'
15 | with_items:
16 | - k8s.tar.bz2
17 | - flannel.tar.bz2
18 | - dashboard.tar.bz2
19 | - metrics-server.tar.bz2
20 | async: 600
21 | poll: 5
22 | run_once: true
23 |
24 | - name: podman login
25 | containers.podman.podman_login:
26 | registry: '{{ registry_endpoint }}'
27 | username: '{{ registry_user }}'
28 | password: '{{ registry_password }}'
29 | reauthorize: true
30 | run_once: true
31 |
32 | - name: tag images
33 | shell: |
34 | podman tag {{ item.repo }}/{{ item.name }}:{{ item.tag }} {{ registry_endpoint }}/{{ registry_project }}/{{ item.name }}:{{ item.tag }}
35 | with_items:
36 | - { repo: '{{ kubernetes_repo }}', name: 'kube-controller-manager', tag: '{{ kubernetes_version }}' }
37 | - { repo: '{{ kubernetes_repo }}', name: 'kube-apiserver', tag: '{{ kubernetes_version }}' }
38 | - { repo: '{{ kubernetes_repo }}', name: 'kube-scheduler', tag: '{{ kubernetes_version }}' }
39 | - { repo: '{{ kubernetes_repo }}', name: 'kube-proxy', tag: '{{ kubernetes_version }}' }
40 | - { repo: '{{ kubernetes_repo }}', name: 'pause', tag: '{{ pause_version }}' }
41 | - { repo: '{{ kubernetes_repo }}', name: 'coredns', tag: '{{ dns_version }}' }
42 | - { repo: '{{ metrics_server_repo }}/metrics-server', name: 'metrics-server', tag: '{{ metrics_server_version }}' }
43 | - { repo: '{{ flannel_repo }}', name: 'flannel', tag: '{{ flannel_version }}' }
44 | - { repo: '{{ dashboard_repo }}', name: 'dashboard', tag: '{{ dashboard_version }}' }
45 | run_once: true
46 |
47 | - name: push images
48 | containers.podman.podman_image:
49 | name: '{{ registry_endpoint }}/{{ registry_project }}/{{ item.name }}'
50 | tag: '{{ item.tag }}'
51 | push: true
52 | state: present
53 | with_items:
54 | - { repo: '{{ kubernetes_repo }}', name: 'kube-controller-manager', tag: '{{ kubernetes_version }}' }
55 | - { repo: '{{ kubernetes_repo }}', name: 'kube-apiserver', tag: '{{ kubernetes_version }}' }
56 | - { repo: '{{ kubernetes_repo }}', name: 'kube-scheduler', tag: '{{ kubernetes_version }}' }
57 | - { repo: '{{ kubernetes_repo }}', name: 'kube-proxy', tag: '{{ kubernetes_version }}' }
58 | - { repo: '{{ kubernetes_repo }}', name: 'pause', tag: '{{ pause_version }}' }
59 | - { repo: '{{ kubernetes_repo }}', name: 'coredns', tag: '{{ dns_version }}' }
60 | - { repo: '{{ metrics_server_repo }}/metrics-server', name: 'metrics-server', tag: '{{ metrics_server_version }}' }
61 | - { repo: '{{ flannel_repo }}', name: 'flannel', tag: '{{ flannel_version }}' }
62 | - { repo: '{{ dashboard_repo }}', name: 'dashboard', tag: '{{ dashboard_version }}' }
63 | run_once: true
64 |
65 | - name: Remove registry.k8s.io images tag
66 | containers.podman.podman_image:
67 | state: absent
68 | name: '{{ item.repo }}/{{ item.name }}'
69 | tag: '{{ item.tag }}'
70 | with_items:
71 | - { repo: '{{ kubernetes_repo }}', name: 'kube-controller-manager', tag: '{{ kubernetes_version }}' }
72 | - { repo: '{{ kubernetes_repo }}', name: 'kube-apiserver', tag: '{{ kubernetes_version }}' }
73 | - { repo: '{{ kubernetes_repo }}', name: 'kube-scheduler', tag: '{{ kubernetes_version }}' }
74 | - { repo: '{{ kubernetes_repo }}', name: 'kube-proxy', tag: '{{ kubernetes_version }}' }
75 | - { repo: '{{ kubernetes_repo }}', name: 'pause', tag: '{{ pause_version }}' }
76 | - { repo: '{{ kubernetes_repo }}', name: 'coredns', tag: '{{ dns_version }}' }
77 | - { repo: '{{ metrics_server_repo }}/metrics-server', name: 'metrics-server', tag: '{{ metrics_server_version }}' }
78 | - { repo: '{{ flannel_repo }}', name: 'flannel', tag: '{{ flannel_version }}' }
79 | - { repo: '{{ dashboard_repo }}', name: 'dashboard', tag: '{{ dashboard_version }}' }
80 | run_once: true
81 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: reset kubernetes cluster
2 | hosts: all
3 | user: root
4 | tasks:
5 | - name: kubeadm reset
6 | shell: |
7 | kubeadm reset -f
8 |
9 | - name: iptables reset
10 | shell: |
11 | firewall-cmd --complete-reload
12 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS") or (ansible_distribution == "Rocky") or (ansible_distribution == "AlmaLinux") or (ansible_distribution == "OracleLinux") or (ansible_distribution == "Anolis")
13 |
14 | - name: ipvs reset
15 | shell: |
16 | ipvsadm --clear
17 |
18 | - name: remove kubernetes components for Redhat/CentOS
19 | yum:
20 | state: absent
21 | disablerepo: '*'
22 | enablerepo: wise2c-k8s
23 | name: '{{ item }}'
24 | with_items:
25 | - kubernetes-cni
26 | - kubectl-{{ kubernetes_version[1:] }}
27 | - kubelet-{{ kubernetes_version[1:] }}
28 | - kubeadm-{{ kubernetes_version[1:] }}
29 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS") or (ansible_distribution == "Rocky") or (ansible_distribution == "AlmaLinux") or (ansible_distribution == "OracleLinux") or (ansible_distribution == "Anolis")
30 |
31 | - name: remove kubernetes components for Ubuntu
32 | apt:
33 | state: absent
34 | name: '{{ item }}'
35 | with_items:
36 | - kubernetes-cni
37 | - kubectl-{{ kubernetes_version[1:] }}
38 | - kubelet-{{ kubernetes_version[1:] }}
39 | - kubeadm-{{ kubernetes_version[1:] }}
40 | when: ansible_distribution =="Ubuntu"
41 |
42 | - name: clean flannel link
43 | shell: |
44 | ip link delete cni0
45 | ip link delete flannel.1
46 | ignore_errors: true
47 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/scripts/check_kubelet_kubeproxy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | kubelet_code_stats=`curl -sLk -o /dev/null -w %{response_code} https://127.0.0.1:10250/stats`
3 | kubelet_code_errortest=`curl -sLk -o /dev/null -w %{response_code} https://127.0.0.1:10250/errortest`
4 | kubeproxy_code_healthz=`curl -sLk -o /dev/null -w %{response_code} http://127.0.0.1:10256/healthz`
5 | kubeproxy_code_errortest=`curl -sLk -o /dev/null -w %{response_code} http://127.0.0.1:10256/errortest`
6 |
7 | if ( [ "$kubelet_code_stats" == "200" ] || [ "$kubelet_code_stats" == "401" ] ) && [ "$kubelet_code_errortest" == "404" ]; then
8 | kubelet_health=true
9 | else
10 | kubelet_health=false
11 | fi
12 |
13 | if ( [ "$kubeproxy_code_healthz" == "200" ] || [ "$kubeproxy_code_healthz" == "503" ] ) && [ "$kubeproxy_code_errortest" == "404" ]; then
14 | kubeproxy_health=true
15 | else
16 | kubeproxy_health=false
17 | fi
18 |
19 | if [ "${kubelet_health}" == true ] && [ "${kubeproxy_health}" == true ]; then
20 | printf true
21 | else
22 | printf false
23 | fi
24 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/scripts/disable-swap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | swapoff -a
3 | sysctl -w vm.swappiness=0
4 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/scripts/taint.sh:
--------------------------------------------------------------------------------
1 | HOSTNAME=`hostname`
2 | while : ; do
3 | kubectl taint --overwrite nodes ${HOSTNAME,,} node-role.kubernetes.io=master:NoSchedule
4 | if [ $? != 0 ]; then
5 | sleep 2
6 | else break
7 | fi
8 | done
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/kubeadm.conf.j2:
--------------------------------------------------------------------------------
1 | apiVersion: kubeadm.k8s.io/v1beta3
2 | bootstrapTokens:
3 | - groups:
4 | - system:bootstrappers:kubeadm:default-node-token
5 | token: wise2c-breeze-token
6 | ttl: 0s
7 | usages:
8 | - signing
9 | - authentication
10 | kind: InitConfiguration
11 | localAPIEndpoint:
12 | advertiseAddress: 127.0.0.1
13 | bindPort: 6443
14 | nodeRegistration:
15 | criSocket: "unix:///var/run/crio/crio.sock"
16 | taints:
17 | - effect: NoSchedule
18 | key: node-role.kubernetes.io/master
19 | ---
20 | apiServer:
21 | certSANs:
22 | - {{ endpoint.split(':')[0] }}
23 | timeoutForControlPlane: 4m0s
24 | apiVersion: kubeadm.k8s.io/v1beta3
25 | certificatesDir: /etc/kubernetes/pki
26 | clusterName: kubernetes
27 | controlPlaneEndpoint: {{ endpoint }}
28 | controllerManager:
29 | dns: {}
30 | etcd:
31 | external:
32 | caFile: /etc/etcd/pki/ca.pem
33 | certFile: /etc/etcd/pki/etcd.pem
34 | endpoints:
35 | {% for host in groups['etcd'] %}
36 | - https://{{ host }}:2379
37 | {% endfor %}
38 | keyFile: /etc/etcd/pki/etcd-key.pem
39 | imageRepository: {{ registry_endpoint }}/{{ registry_project }}
40 | kind: ClusterConfiguration
41 | kubernetesVersion: {{ kubernetes_version }}
42 | networking:
43 | dnsDomain: cluster.local
44 | podSubnet: 10.244.0.0/16
45 | serviceSubnet: 10.96.0.0/12
46 | scheduler: {}
47 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/kubelet.conf.j2:
--------------------------------------------------------------------------------
1 | KUBELET_EXTRA_ARGS=--pod-infra-container-image={{ registry_endpoint }}/{{ registry_project }}/pause:3.10 \
2 | --cgroup-driver=systemd \
3 | --container-runtime-endpoint=unix:///var/run/crio/crio.sock \
4 | --runtime-request-timeout=10m
5 | --alsologtostderr=true \
6 | --logtostderr=false \
7 | --anonymous-auth=true \
8 | --container-log-max-files=10 \
9 | --container-log-max-size=50Mi \
10 | --log-dir=/var/log/pods
11 |
12 | #KUBELET_EXTRA_ARGS=--pod-infra-container-image={{ registry_endpoint }}/{{ registry_project }}/pause:3.10 \
13 | # --cgroup-driver=systemd \
14 | # --container-runtime-endpoint=unix:///var/run/crio/crio.sock \
15 | # --runtime-request-timeout=10m \
16 | # --alsologtostderr=true \
17 | # --logtostderr=false \
18 | # --anonymous-auth=true \
19 | # --container-log-max-files=10 \
20 | # --container-log-max-size=50Mi \
21 | # --log-dir=/var/log/pods
22 | # --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
23 | # --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
24 | # --eviction-hard=memory.available<500Mi,nodefs.available<10%
25 | # --max-pods=200
26 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/kubernetes-dashboard-svc.yml.j2:
--------------------------------------------------------------------------------
1 | # ------------------- Dashboard Service ------------------- #
2 |
3 | kind: Service
4 | apiVersion: v1
5 | metadata:
6 | labels:
7 | k8s-app: kubernetes-dashboard
8 | name: kubernetes-dashboard
9 | namespace: kubernetes-dashboard
10 | spec:
11 | type: NodePort
12 | ports:
13 | - port: 8443
14 | nodePort: 30300
15 | selector:
16 | k8s-app: kubernetes-dashboard
17 |
18 | ---
19 | # ------------------- Dashboard Service Account------------------- #
20 | apiVersion: v1
21 | kind: ServiceAccount
22 | metadata:
23 | name: admin-user
24 | namespace: kube-system
25 |
26 | ---
27 | # ------------------- Dashboard Cluster Role Binding------------------- #
28 | apiVersion: rbac.authorization.k8s.io/v1
29 | kind: ClusterRoleBinding
30 | metadata:
31 | name: admin-user
32 | roleRef:
33 | apiGroup: rbac.authorization.k8s.io
34 | kind: ClusterRole
35 | name: cluster-admin
36 | subjects:
37 | - kind: ServiceAccount
38 | name: admin-user
39 | namespace: kube-system
40 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/max-scale.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: max-scale
6 | name: max-scale
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: max-scale
12 | template:
13 | metadata:
14 | labels:
15 | app: max-scale
16 | name: max-scale
17 | spec:
18 | containers:
19 | - env:
20 | - name: MYSQL_ROOT_PASSWORD
21 | value: password
22 | - name: REPLICATION_USER
23 | value: root
24 | - name: REPLICATION_PASSWORD
25 | value: password
26 | - name: MasterIP
27 | value: {{ master }}
28 | - name: Slave1IP
29 | value: {{ slave1 }}
30 | - name: Slave2IP
31 | value: {{ slave2 }}
32 | - name: MySQLPort
33 | value: "3307"
34 | - name: NeverMasterIP
35 | value: {{ neverMaster }}
36 | image: {{ registry }}/{{ project }}/maxscale1.4.5-1-docker:latest
37 | imagePullPolicy: IfNotPresent
38 | name: max-scale
39 | ---
40 | kind: Service
41 | apiVersion: v1
42 | metadata:
43 | labels:
44 | app: max-scale
45 | name: max-scale
46 | spec:
47 | type: NodePort
48 | ports:
49 | - name: 3306-3306
50 | port: 3306
51 | nodePort: 30308
52 | targetPort: 3306
53 | protocol: TCP
54 | selector:
55 | app: max-scale
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/service_cidr.txt.j2:
--------------------------------------------------------------------------------
1 | {{ service_cidr }}
2 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/upgrade/redhat-centos/upgrade_first_master_node.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | long_k8s_version={{ kubernetes_version }}
3 | short_k8s_version=${long_k8s_version#*v}
4 | current_k8s_server_version=`kubectl version |grep "Server Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
5 | current_k8s_client_version=`kubectl version |grep "Client Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
6 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
7 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -V -r | head -n 1)" != "$1"; }
8 | if (version_lt ${current_k8s_server_version} ${long_k8s_version}) || (version_lt ${current_k8s_client_version} ${long_k8s_version}); then
9 | yum clean all
10 | yum install -y --disablerepo=* --enablerepo=wise2c-k8s kubeadm-${short_k8s_version}-0 --disableexcludes=kubernetes
11 | kubectl drain `hostname`
12 | kubeadm upgrade apply --certificate-renewal=false -y ${long_k8s_version}
13 | yum install -y --disablerepo=* --enablerepo=wise2c-k8s kubelet-${short_k8s_version}-0 kubectl-${short_k8s_version}-0 --disableexcludes=kubernetes
14 | systemctl daemon-reload
15 | systemctl restart kubelet
16 | kubectl uncordon `hostname`
17 | echo 'first master node' > ./first_master_node.log
18 | else
19 | echo "current version is not lower than ${long_k8s_version}"
20 | fi
21 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/upgrade/redhat-centos/upgrade_other_master_nodes.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [ ! -f "./first_master_node.log" ];then
3 | long_k8s_version={{ kubernetes_version }}
4 | short_k8s_version=${long_k8s_version#*v}
5 | current_k8s_server_version=`kubectl version |grep "Server Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
6 | current_k8s_client_version=`kubectl version |grep "Client Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
7 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
8 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -V -r | head -n 1)" != "$1"; }
9 | if (version_lt ${current_k8s_server_version} ${long_k8s_version}) || (version_lt ${current_k8s_client_version} ${long_k8s_version}); then
10 | yum clean all
11 | yum install -y --disablerepo=* --enablerepo=wise2c-k8s kubeadm-${short_k8s_version}-0 --disableexcludes=kubernetes
12 | kubectl drain `hostname`
13 | kubeadm upgrade node --certificate-renewal=false -y
14 | yum install -y --disablerepo=* --enablerepo=wise2c-k8s kubelet-${short_k8s_version}-0 kubectl-${short_k8s_version}-0 --disableexcludes=kubernetes
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 | kubectl uncordon `hostname`
18 | else
19 | echo "current version is not lower than ${long_k8s_version}"
20 | fi
21 | else
22 | echo 'This is the first master node.'
23 | fi
24 | rm -f ./first_master_node.log
25 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/upgrade/redhat-centos/upgrade_worker_nodes.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | long_k8s_version={{ kubernetes_version }}
3 | short_k8s_version=${long_k8s_version#*v}
4 | current_k8s_server_version=`kubectl version |grep "Server Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
5 | current_k8s_client_version=`kubectl version |grep "Client Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
6 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
7 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -V -r | head -n 1)" != "$1"; }
8 | if (version_lt ${current_k8s_server_version} ${long_k8s_version}) || (version_lt ${current_k8s_client_version} ${long_k8s_version}); then
9 | yum clean all
10 | yum install -y --disablerepo=* --enablerepo=wise2c-k8s kubeadm-${short_k8s_version}-0 --disableexcludes=kubernetes
11 | kubectl drain `hostname` --ignore-daemonsets
12 | #kubeadm upgrade node config --kubelet-version ${long_k8s_version} --certificate-renewal=false
13 | kubeadm upgrade node --certificate-renewal=false -y
14 | yum install -y --disablerepo=* --enablerepo=wise2c-k8s kubelet-${short_k8s_version}-0 kubectl-${short_k8s_version}-0 --disableexcludes=kubernetes
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 | kubectl uncordon `hostname`
18 | rm -f $HOME/.kube/config
19 | else
20 | echo "current version is not lower than ${long_k8s_version}"
21 | fi
22 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/upgrade/ubuntu/upgrade_first_master_node.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | long_k8s_version={{ kubernetes_version }}
3 | short_k8s_version=${long_k8s_version#*v}
4 | current_k8s_server_version=`kubectl version |grep "Server Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
5 | current_k8s_client_version=`kubectl version |grep "Client Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
6 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
7 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -V -r | head -n 1)" != "$1"; }
8 | if (version_lt ${current_k8s_server_version} ${long_k8s_version}) || (version_lt ${current_k8s_client_version} ${long_k8s_version}); then
9 | apt-get update
10 | apt-get install -y kubeadm --disableexcludes=kubernetes
11 | kubectl drain `hostname`
12 | kubeadm upgrade apply --certificate-renewal=false -y ${long_k8s_version}
13 | apt-get install -y kubelet kubectl --disableexcludes=kubernetes
14 | systemctl daemon-reload
15 | systemctl restart kubelet
16 | kubectl uncordon `hostname`
17 | echo 'first master node' > ./first_master_node.log
18 | else
19 | echo "current version is not lower than ${long_k8s_version}"
20 | fi
21 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/upgrade/ubuntu/upgrade_other_master_nodes.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [ ! -f "./first_master_node.log" ];then
3 | long_k8s_version={{ kubernetes_version }}
4 | short_k8s_version=${long_k8s_version#*v}
5 | current_k8s_server_version=`kubectl version |grep "Server Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
6 | current_k8s_client_version=`kubectl version |grep "Client Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
7 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
8 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -V -r | head -n 1)" != "$1"; }
9 | if (version_lt ${current_k8s_server_version} ${long_k8s_version}) || (version_lt ${current_k8s_client_version} ${long_k8s_version}); then
10 | apt-get update
11 | apt-get install -y kubeadm --disableexcludes=kubernetes
12 | kubectl drain `hostname`
13 | kubeadm upgrade node --certificate-renewal=false -y
14 | apt-get install -y kubelet kubectl --disableexcludes=kubernetes
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 | kubectl uncordon `hostname`
18 | else
19 | echo "current version is not lower than ${long_k8s_version}"
20 | fi
21 | else
22 | echo 'This is the first master node.'
23 | fi
24 | rm -f ./first_master_node.log
25 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/template/upgrade/ubuntu/upgrade_worker_nodes.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | long_k8s_version={{ kubernetes_version }}
3 | short_k8s_version=${long_k8s_version#*v}
4 | current_k8s_server_version=`kubectl version |grep "Server Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
5 | current_k8s_client_version=`kubectl version |grep "Client Version" |awk '{print $5}' |awk -F':' '{print $2}' |awk -F'"' '{print $2}'`
6 | function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
7 | function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -V -r | head -n 1)" != "$1"; }
8 | if (version_lt ${current_k8s_server_version} ${long_k8s_version}) || (version_lt ${current_k8s_client_version} ${long_k8s_version}); then
9 | apt-get update
10 | apt-get install -y kubeadm --disableexcludes=kubernetes
11 | kubectl drain `hostname` --ignore-daemonsets
12 | #kubeadm upgrade node config --kubelet-version ${long_k8s_version} --certificate-renewal=false
13 | kubeadm upgrade node --certificate-renewal=false -y
14 | apt-get install -y kubelet kubectl --disableexcludes=kubernetes
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 | kubectl uncordon `hostname`
18 | rm -f $HOME/.kube/config
19 | else
20 | echo "current version is not lower than ${long_k8s_version}"
21 | fi
22 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/worker-node.ansible:
--------------------------------------------------------------------------------
1 | - name: init setup on worker nodes
2 | include_tasks: both.ansible
3 |
4 | - name: copy worker-join-command.sh
5 | copy:
6 | src: '{{ item.src }}'
7 | dest: '{{ item.dest }}'
8 | mode: 0755
9 | with_items:
10 | - { src: 'file/worker-join-command.sh', dest: '{{ path }}/worker-join-command.sh' }
11 |
12 | - name: update worker-join-command.sh
13 | shell: |
14 | sed -i "s/127.0.0.1:6443/{{ endpoint }}/g" {{ path }}/worker-join-command.sh
15 |
16 | - name: setup node
17 | shell: /var/lib/wise2c/tmp/kubernetes/worker-join-command.sh
18 | async: 600
19 |
20 | - name: update kubelet.conf
21 | shell: |
22 | sed -i "s/.*server:.*/ server: https:\/\/{{ endpoint }}/g" /etc/kubernetes/kubelet.conf
23 |
24 | - name: restart kubelet
25 | systemd:
26 | name: kubelet
27 | state: restarted
28 |
29 | - name: execute prometheus-fix-worker-nodes script for Redhat/CentOS
30 | shell: ./prometheus-fix-worker-nodes.sh
31 | args:
32 | chdir: '/var/lib/wise2c/tmp/kubernetes/'
33 | when: (ansible_distribution == "RedHat") or (ansible_distribution == "CentOS") or (ansible_distribution == "Rocky") or (ansible_distribution == "AlmaLinux") or (ansible_distribution == "OracleLinux") or (ansible_distribution == "Anolis")
34 |
35 | - name: execute prometheus-fix-worker-nodes script for Ubuntu
36 | shell: ./prometheus-fix-worker-nodes-ubuntu.sh
37 | args:
38 | chdir: '/var/lib/wise2c/tmp/kubernetes/'
39 | when: ansible_distribution =="Ubuntu"
40 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/yat/all.yml.gotmpl:
--------------------------------------------------------------------------------
1 | endpoint: {{ .kubernetes.Inherent.endpoint }}
2 | registry_endpoint: {{ .harbor.Inherent.endpoint }}
3 | registry_https: {{ .harbor.Inherent.https }}
4 | registry_user: {{ .harbor.Inherent.user }}
5 | registry_password: {{ .harbor.Inherent.password }}
6 | registry_project: library
7 | add_worker_node_only: {{ .kubernetes.Property.AddWorkerNodesOnly }}
8 | upgrade_cluster: {{ .kubernetes.Property.UpgradeCluster }}
9 | auto_upgrade_nodes: {{ .kubernetes.Property.AutoUpgradeK8sNodes }}
10 | cni: {{ .kubernetes.Property.cni }}
11 | calico_nodes: {{ .kubernetes.Property.calico_nodes }}
12 | pod_cidr: {{ .kubernetes.Property.pod_cidr }}
13 | service_cidr: {{ .kubernetes.Property.service_cidr }}
14 | cluster_cidr: {{ .kubernetes.Property.cluster_cidr }}
15 | calico_typha_replicas: {{ .kubernetes.Property.calico_typha_replicas }}
16 | calico_mode: {{ .kubernetes.Property.calico_mode }}
17 | calico_node_ip_detection_mode: {{ .kubernetes.Property.calico_node_ip_detection_mode }}
18 |
--------------------------------------------------------------------------------
/kubernetes-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [etcd]
2 | {{ range $v := .etcd.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
5 |
6 | [master]
7 | {{ range $v := .kubernetes.Hosts.master -}}
8 | {{ $v.IP }} role=master
9 | {{ end }}
10 |
11 | [node]
12 | {{ range $v := .kubernetes.Hosts.worker -}}
13 | {{ $v.IP }} role=worker
14 | {{ end }}
15 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/loadbalancer-playbook/version/file/README.md
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/file/keepalived.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Make sure we react to these signals by running stop() when we see them - for clean shutdown
4 | # And then exiting
5 | trap "stop; exit 0;" SIGTERM SIGINT
6 |
7 | stop()
8 | {
9 | # We're here because we've seen SIGTERM, likely via a Docker stop command or similar
10 | # Let's shutdown cleanly
11 | echo "SIGTERM caught, terminating keepalived process..."
12 | # Record PIDs
13 | pid=$(pidof keepalived)
14 | # Kill them
15 | kill -TERM $pid > /dev/null 2>&1
16 | # Wait till they have been killed
17 | wait $pid
18 | echo "Terminated."
19 | exit 0
20 | }
21 |
22 | # This loop runs till until we've started up successfully
23 | while true; do
24 |
25 | # Check if Keepalived is running by recording it's PID (if it's not running $pid will be null):
26 | pid=$(pidof keepalived)
27 |
28 | # If $pid is null, do this to start or restart Keepalived:
29 | while [ -z "$pid" ]; do
30 | #Obviously optional:
31 | #echo "Starting Confd population of files..."
32 | #/usr/bin/confd -onetime
33 | echo "Displaying resulting /etc/keepalived/keepalived.conf contents..."
34 | cat /etc/keepalived/keepalived.conf
35 | echo "Starting Keepalived in the background..."
36 | /usr/sbin/keepalived --dont-fork --dump-conf --log-console --log-detail --vrrp &
37 | # Check if Keepalived is now running by recording it's PID (if it's not running $pid will be null):
38 | pid=$(pidof keepalived)
39 |
40 | # If $pid is null, startup failed; log the fact and sleep for 2s
41 | # We'll then automatically loop through and try again
42 | if [ -z "$pid" ]; then
43 | echo "Startup of Keepalived failed, sleeping for 2s, then retrying..."
44 | sleep 2
45 | fi
46 |
47 | done
48 |
49 | # Break this outer loop once we've started up successfully
50 | # Otherwise, we'll silently restart and Rancher won't know
51 | break
52 |
53 | done
54 |
55 | # Wait until the Keepalived processes stop (for some reason)
56 | wait $pid
57 | echo "The Keepalived process is no longer running, exiting..."
58 | # Exit with an error
59 | exit 1
60 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/loadbalancer-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | set -e
3 | path=`dirname $0`
4 |
5 | haproxy_version=`cat ${path}/components-version.txt |grep "HAProxy" |awk '{print $3}'`
6 | keepalived_version=`cat ${path}/components-version.txt |grep "Keepalived" |awk '{print $3}'`
7 |
8 | echo "haproxy_version: ${haproxy_version}" > ${path}/inherent.yaml
9 | echo "keepalived_version: ${keepalived_version}" >> ${path}/inherent.yaml
10 |
11 | echo "build wise2c/k8s-keepalived:${keepalived_version} image"
12 | cd ${path}/keepalived
13 | #docker build -t wise2c/k8s-keepalived:${keepalived_version} .
14 | docker pull wise2c/k8s-keepalived:${keepalived_version}
15 | docker save wise2c/k8s-keepalived:${keepalived_version} -o ../file/keepalived-${keepalived_version}.tar
16 | bzip2 -z --best ../file/keepalived-${keepalived_version}.tar
17 |
18 | echo "build wise2c/k8s-haproxy:${haproxy_version} image"
19 | docker pull haproxy:${haproxy_version}
20 | docker tag haproxy:${haproxy_version} wise2c/k8s-haproxy:${haproxy_version}
21 | docker save wise2c/k8s-haproxy:${haproxy_version} -o ../file/haproxy-${haproxy_version}.tar
22 | bzip2 -z --best ../file/haproxy-${haproxy_version}.tar
23 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/keepalived/Dockerfile.aarch64:
--------------------------------------------------------------------------------
1 | FROM almalinux:8
2 | RUN yum install -y keepalived procps-ng util-linux nc
3 | COPY keepalived.sh /usr/bin/keepalived.sh
4 | COPY keepalived.conf /etc/keepalived/keepalived.conf
5 | RUN chmod +x /usr/bin/keepalived.sh
6 | ENTRYPOINT ["/usr/bin/keepalived.sh"]
7 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/keepalived/Dockerfile.amd64:
--------------------------------------------------------------------------------
1 | FROM almalinux:8
2 | RUN yum install -y keepalived procps-ng util-linux nc
3 | COPY keepalived.sh /usr/bin/keepalived.sh
4 | COPY keepalived.conf /etc/keepalived/keepalived.conf
5 | RUN chmod +x /usr/bin/keepalived.sh
6 | ENTRYPOINT ["/usr/bin/keepalived.sh"]
7 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/keepalived/keepalived.conf:
--------------------------------------------------------------------------------
1 | global_defs {
2 | router_id {{RID}}
3 | vrrp_version 2
4 | vrrp_garp_master_delay 1
5 | vrrp_mcast_group4 {{MCAST_GROUP}}
6 | script_user root
7 | enable_script_security
8 | }
9 |
10 | vrrp_script chk_haproxy {
11 | script "/usr/bin/curl -sLk -o /dev/null -w %{response_code} https://127.0.0.1:6444 |grep 403"
12 | timeout 3
13 | interval 1 # check every 1 second
14 | fall 3 # require 3 failures for KO
15 | rise 2 # require 2 successes for OK
16 | }
17 |
18 | vrrp_instance lb-vips {
19 | state BACKUP
20 | interface {{INTERFACE}}
21 | virtual_router_id {{VRID}}
22 | priority 100
23 | advert_int 1
24 | nopreempt
25 | track_script {
26 | chk_haproxy
27 | }
28 | authentication {
29 | auth_type PASS
30 | auth_pass blahblah
31 | }
32 | virtual_ipaddress {
33 | {{VIRTUAL_IP}}/{{NETMASK_BIT}} dev {{INTERFACE}}
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/keepalived/keepalived.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Substitute variables in config file.
4 | /bin/sed -i "s/{{VIRTUAL_IP}}/${VIRTUAL_IP}/g" /etc/keepalived/keepalived.conf
5 | /bin/sed -i "s/{{CHECK_PORT}}/${CHECK_PORT}/g" /etc/keepalived/keepalived.conf
6 | /bin/sed -i "s/{{RID}}/${RID}/g" /etc/keepalived/keepalived.conf
7 | /bin/sed -i "s/{{VRID}}/${VRID}/g" /etc/keepalived/keepalived.conf
8 | /bin/sed -i "s/{{INTERFACE}}/${INTERFACE}/g" /etc/keepalived/keepalived.conf
9 | /bin/sed -i "s/{{NETMASK_BIT}}/${NETMASK_BIT}/g" /etc/keepalived/keepalived.conf
10 | /bin/sed -i "s/{{MCAST_GROUP}}/${MCAST_GROUP}/g" /etc/keepalived/keepalived.conf
11 |
12 | # Make sure we react to these signals by running stop() when we see them - for clean shutdown
13 | # And then exiting
14 | trap "stop; exit 0;" SIGTERM SIGINT
15 |
16 | stop()
17 | {
18 | # We're here because we've seen SIGTERM, likely via a Docker stop command or similar
19 | # Let's shutdown cleanly
20 | echo "SIGTERM caught, terminating keepalived process..."
21 | # Record PIDs
22 | pid=$(pidof keepalived)
23 | # Kill them
24 | kill -TERM $pid > /dev/null 2>&1
25 | # Wait till they have been killed
26 | wait $pid
27 | echo "Terminated."
28 | exit 0
29 | }
30 |
31 | # This loop runs till until we've started up successfully
32 | while true; do
33 | # Check if Keepalived is running by recording it's PID (if it's not running $pid will be null):
34 | pid=$(pidof keepalived)
35 | # If $pid is null, do this to start or restart Keepalived:
36 | while [ -z "$pid" ]; do
37 | #Obviously optional:
38 | #echo "Starting Confd population of files..."
39 | #/usr/bin/confd -onetime
40 | echo "Displaying resulting /etc/keepalived/keepalived.conf contents..."
41 | cat /etc/keepalived/keepalived.conf
42 | echo "Starting Keepalived in the background..."
43 | /usr/sbin/keepalived --dont-fork --dump-conf --log-console --log-detail --vrrp &
44 | # Check if Keepalived is now running by recording it's PID (if it's not running $pid will be null):
45 | pid=$(pidof keepalived)
46 | # If $pid is null, startup failed; log the fact and sleep for 2s
47 | # We'll then automatically loop through and try again
48 | if [ -z "$pid" ]; then
49 | echo "Startup of Keepalived failed, sleeping for 2s, then retrying..."
50 | sleep 2
51 | fi
52 | done
53 | # Break this outer loop once we've started up successfully
54 | # Otherwise, we'll silently restart and Rancher won't know
55 | break
56 | done
57 | # Wait until the Keepalived processes stop (for some reason)
58 | wait $pid
59 | echo "The Keepalived process is no longer running, exiting..."
60 | # Exit with an error
61 | exit 1
62 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "self",
4 | "label": "haproxy hosts",
5 | "description": "hosts to be set up as haproxy/keepalived nodes",
6 | "type": "host",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "k8sVip",
12 | "label": "vip for k8s master",
13 | "description": "input a vip for k8s master",
14 | "type": "string",
15 | "default": "",
16 | "required": true
17 | },
18 | {
19 | "variable": "nic",
20 | "label": "NIC",
21 | "description": "Create virtual IP on this network interface card",
22 | "type": "string",
23 | "default": "",
24 | "required": true
25 | },
26 | {
27 | "variable": "netMask",
28 | "label": "Subnet Host Bits",
29 | "description": "Please input subnet host bits like 16 for 255.255.0.0 or 24 for 255.255.255.0",
30 | "type": "int",
31 | "default": "",
32 | "required": true
33 | },
34 | {
35 | "variable": "routerID",
36 | "label": "Keepalived router id",
37 | "description": "Keepalived router id",
38 | "type": "int",
39 | "default": "10",
40 | "required": false
41 | },
42 | {
43 | "variable": "vRouterID",
44 | "label": "Keepalived virtual route id",
45 | "description": "Keepalived virtual route id",
46 | "type": "int",
47 | "default": "160",
48 | "required": false
49 | }
50 | ]
51 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean lb
2 | hosts: hosts
3 | user: root
4 | tasks:
5 | - name: stop & rm old lb
6 | shell: |
7 | podman stop haproxy
8 | podman rm haproxy
9 | podman stop keepalived
10 | podman rm keepalived
11 | ignore_errors: true
12 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/template/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | global
2 | log 127.0.0.1 local0
3 | log 127.0.0.1 local1 notice
4 | maxconn 20000
5 | daemon
6 | spread-checks 2
7 |
8 | defaults
9 | mode http
10 | log global
11 | option tcplog
12 | option dontlognull
13 | option http-server-close
14 | option redispatch
15 | timeout http-request 2s
16 | timeout queue 3s
17 | timeout connect 1s
18 | timeout client 1h
19 | timeout server 1h
20 | timeout http-keep-alive 1h
21 | timeout check 2s
22 | maxconn 18000
23 |
24 | backend stats-back
25 | mode http
26 | balance roundrobin
27 | stats uri /haproxy/stats
28 | stats auth pxcstats:secret
29 |
30 | frontend stats-front
31 | bind *:8081
32 | mode http
33 | default_backend stats-back
34 |
35 | {% for s in servers | d([], true) | list %}
36 | frontend {{ s.name }}
37 | bind *:{{ s.port + 1 }}
38 | mode tcp
39 | tcp-request inspect-delay 5s
40 | #tcp-request content reject if !HTTP
41 | tcp-request content accept if { req.ssl_hello_type 1 }
42 | default_backend {{ s.name }}
43 |
44 | backend {{ s.name }}
45 | mode tcp
46 | option tcp-check
47 | balance roundrobin
48 | default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
49 | {% for ip in s.ips %}
50 | server {{ s.name }}{{ loop.index }} {{ ip }}:{{ s.port }} check
51 | {% endfor %}
52 |
53 | {% endfor %}
54 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/template/keepalive.cfg.j2:
--------------------------------------------------------------------------------
1 | global_defs {
2 | router_id {{ routerID }}
3 | vrrp_version 2
4 | vrrp_garp_master_delay 1
5 | vrrp_garp_master_refresh 1
6 | vrrp_mcast_group4 224.0.0.200
7 | script_user root
8 | enable_script_security
9 | }
10 |
11 | {% for v in vips -%}
12 | vrrp_script chk-{{ v.name }} {
13 | script "/usr/bin/curl -sLk -o /dev/null -w %{response_code} https://127.0.0.1:6444 |grep 403"
14 | timeout 3
15 | interval 5 # check every 5 second
16 | fall 3 # require 3 failures for KO
17 | rise 2 # require 2 successes for OK
18 | }
19 |
20 | vrrp_instance lb-vips-{{ v.name }} {
21 | state BACKUP
22 | interface {{ interface }}
23 | virtual_router_id {{ vRouterID + loop.index }}
24 | {% set vi = loop.index0 -%}
25 | {% for host in ansible_play_batch -%}
26 | {% if host == inventory_hostname -%}
27 | {% if loop.index0 + vi > loop.length -%}
28 | priority {{ loop.index + vi - loop.length }}
29 | {% else -%}
30 | priority {{ loop.index + vi }}
31 | {% endif -%}
32 | {% endif -%}
33 | {% endfor -%}
34 | advert_int 1
35 | nopreempt
36 | track_script {
37 | chk-{{ v.name }}
38 | }
39 | authentication {
40 | auth_type PASS
41 | auth_pass blahblah
42 | }
43 | virtual_ipaddress {
44 | {{ v.vip }}/{{ netmask }} dev {{ interface }}
45 | }
46 | }
47 |
48 | {% endfor %}
49 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/template/readme.txt:
--------------------------------------------------------------------------------
1 | 关于HAProxy的相关超时设定介绍(haproxy.cfg文件):
2 |
3 | defaults
4 | mode http # 7层http代理,另有4层tcp代理
5 | log global
6 | option httplog # 在日志中记录http请求、session信息等
7 | option dontlognull # 不要在日志中记录空连接
8 | option http-server-close # 后端为动态应用程序建议使用http-server-close,后端为静态建议使用http-keep-alive
9 | option forwardfor except 127.0.0.0/8 # haproxy将在发往后端的请求中加上"X-Forwarded-For"首部字段
10 | option redispatch # 当某后端down掉使得haproxy无法转发携带cookie的请求到该后端时,将其转发到别的后端上
11 | timeout http-request 10s # 此为等待客户端发送完整请求的最大时长,应该设置较短些防止洪水攻击,如设置为2-3秒
12 | # haproxy总是要求一次请求或响应全部发送完成后才会处理、转发,
13 | timeout queue 1m # 请求在队列中的最大时长,1分钟太长了。设置为10秒都有点长,10秒请求不到资源客户端会失去耐心
14 | timeout connect 10s # haproxy和服务端建立连接的最大时长,设置为1秒就足够了。局域网内建立连接一般都是瞬间的
15 | timeout client 1m # 和客户端保持空闲连接的超时时长,在高并发下可稍微短一点,可设置为10秒以尽快释放连接
16 | timeout server 1m # 和服务端保持空闲连接的超时时长,局域网内建立连接很快,所以尽量设置短一些,特别是并发时,如设置为1-3秒
17 | timeout http-keep-alive 10s # 和客户端保持长连接的最大时长。优先级高于timeout http-request高于timeout client
18 | timeout check 10s # 和后端服务器成功建立连接后到最终完成检查的时长(不包括建立连接的时间,只是读取到检查结果的时长),
19 | # 可设置短一点,如1-2秒
20 | maxconn 3000 # 默认和前段的最大连接数,但不能超过global中的maxconn硬限制数
21 |
22 | 修改后建议配置为如下:
23 |
24 | defaults
25 | mode http
26 | log global
27 | option tcplog
28 | option dontlognull
29 | option http-server-close
30 | option redispatch
31 | timeout http-request 2s
32 | timeout queue 3s
33 | timeout connect 1s
34 | timeout client 1h
35 | timeout server 1h
36 | timeout http-keep-alive 1h
37 | timeout check 2s
38 | maxconn 18000
39 |
40 | 这里1小时的设定是为了kubectl命令连接到容器的shell里,比如执行kubectl exec -it centos-xxxx bash后不输入命令的时候能够不被立刻踢出
41 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/yat/all.yml.gotmpl:
--------------------------------------------------------------------------------
1 | haproxy_version: {{ .loadbalancer.Inherent.haproxy_version }}
2 | keepalived_version: {{ .loadbalancer.Inherent.keepalived_version }}
3 | registry_endpoint: {{ .harbor.Inherent.endpoint }}
4 | registry_https: {{ .harbor.Inherent.https }}
5 | registry_user: {{ .harbor.Inherent.user }}
6 | registry_password: {{ .harbor.Inherent.password }}
7 | registry_project: library
8 |
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [hosts]
2 | {{ range $v := .loadbalancer.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
--------------------------------------------------------------------------------
/loadbalancer-playbook/version/yat/hosts.yml.gotmpl:
--------------------------------------------------------------------------------
1 | servers:
2 | {{ if .kubernetes.Hosts -}}
3 | - name: k8s
4 | port: 6443
5 | target: 6444
6 | ips:
7 | {{ range $v := .kubernetes.Hosts.master }} - {{ $v.IP }}
8 | {{ end }}
9 | {{- end }}
10 |
11 | interface: {{ .loadbalancer.Property.nic }}
12 | netmask: {{ .loadbalancer.Property.netMask }}
13 | routerID: {{ .loadbalancer.Property.routerID }}
14 | vRouterID: {{ .loadbalancer.Property.vRouterID }}
15 | vips:
16 | - name: k8s
17 | vip: {{ .loadbalancer.Property.k8sVip }}
18 | chkport: 6444
19 |
--------------------------------------------------------------------------------
/manual/Add-Istio-001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Add-Istio-001.png
--------------------------------------------------------------------------------
/manual/Add-Istio-002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Add-Istio-002.png
--------------------------------------------------------------------------------
/manual/AddWorkerNodes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/AddWorkerNodes.png
--------------------------------------------------------------------------------
/manual/Alertmanager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Alertmanager.png
--------------------------------------------------------------------------------
/manual/Breeze-Logo.ai:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Breeze-Logo.ai
--------------------------------------------------------------------------------
/manual/Breeze-Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Breeze-Logo.png
--------------------------------------------------------------------------------
/manual/BreezeCNCF.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeCNCF.png
--------------------------------------------------------------------------------
/manual/BreezeLogo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeLogo.png
--------------------------------------------------------------------------------
/manual/BreezeManual-CN.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeManual-CN.pdf
--------------------------------------------------------------------------------
/manual/BreezeManual.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeManual.pdf
--------------------------------------------------------------------------------
/manual/BreezeScreenShots001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots001.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots002.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots003.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots004.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots005.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots005.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots006.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots006.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots007.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots007.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots008.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots008.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots009.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots009.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots010.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots010.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots011.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots011.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots012.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots012.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots013.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots013.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots014.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots014.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots015.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots015.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots016.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots016.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots017.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots017.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots018.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots018.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots019.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots019.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots020.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots021.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots021.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots022.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots022.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots023.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots023.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots024.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots024.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots025.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots025.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots026.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots026.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots027.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots027.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots028.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots028.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots029.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots029.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots030.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots030.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots031.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots031.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShots032.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShots032.png
--------------------------------------------------------------------------------
/manual/BreezeScreenShotsExternalHarbor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeScreenShotsExternalHarbor.png
--------------------------------------------------------------------------------
/manual/BreezeVersionSelect.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/BreezeVersionSelect.png
--------------------------------------------------------------------------------
/manual/ClusterCheck.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/ClusterCheck.png
--------------------------------------------------------------------------------
/manual/Grafana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Grafana.png
--------------------------------------------------------------------------------
/manual/Install-Istio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Install-Istio.png
--------------------------------------------------------------------------------
/manual/Istio-Grafana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Istio-Grafana.png
--------------------------------------------------------------------------------
/manual/Istio-Jaeger.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Istio-Jaeger.png
--------------------------------------------------------------------------------
/manual/Istio-Kiali-001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Istio-Kiali-001.png
--------------------------------------------------------------------------------
/manual/Istio-Kiali-002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Istio-Kiali-002.png
--------------------------------------------------------------------------------
/manual/Istio-Prometheus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Istio-Prometheus.png
--------------------------------------------------------------------------------
/manual/Kubernetes-HA-Breeze.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Kubernetes-HA-Breeze.png
--------------------------------------------------------------------------------
/manual/KubernetesDashboard-001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/KubernetesDashboard-001.png
--------------------------------------------------------------------------------
/manual/KubernetesDashboard-002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/KubernetesDashboard-002.png
--------------------------------------------------------------------------------
/manual/KubernetesDashboard-003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/KubernetesDashboard-003.png
--------------------------------------------------------------------------------
/manual/KubernetesHA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/KubernetesHA.png
--------------------------------------------------------------------------------
/manual/Prometheus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Prometheus.png
--------------------------------------------------------------------------------
/manual/README.md:
--------------------------------------------------------------------------------
1 | 软件手册资源文件
2 |
--------------------------------------------------------------------------------
/manual/SelectDockerWorkerNodes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/SelectDockerWorkerNodes.png
--------------------------------------------------------------------------------
/manual/SelectTag.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/SelectTag.png
--------------------------------------------------------------------------------
/manual/Wise2C-Breeze-Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/Wise2C-Breeze-Architecture.png
--------------------------------------------------------------------------------
/manual/With-Istio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/With-Istio.png
--------------------------------------------------------------------------------
/manual/haproxy-keepalived-001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/haproxy-keepalived-001.png
--------------------------------------------------------------------------------
/manual/haproxy-keepalived-002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/haproxy-keepalived-002.png
--------------------------------------------------------------------------------
/manual/prometheus-role.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/manual/prometheus-role.png
--------------------------------------------------------------------------------
/prometheus-playbook/version/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=hosts
3 | callback_plugins = ../../callback_plugins
4 | callback_whitelist = log_back
5 | retry_files_enabled = false
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/prometheus-playbook/version/file/README.md
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/add-on.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | KubePrometheusVersion=`cat components-version.txt |grep "KubePrometheus" |awk '{print $3}'`
3 |
4 | kubectl apply -f kube-controller-manager.yaml
5 | kubectl apply -f kube-scheduler.yaml
6 | kubectl apply -f coredns.yaml
7 |
8 | etcd1_address=`cat etcd-address.txt |awk -F "," '{print $1}' |awk -F "//" '{print $2}' |awk -F ":" '{print $1}'`
9 | etcd2_address=`cat etcd-address.txt |awk -F "," '{print $2}' |awk -F "//" '{print $2}' |awk -F ":" '{print $1}'`
10 | etcd3_address=`cat etcd-address.txt |awk -F "," '{print $3}' |awk -F "//" '{print $2}' |awk -F ":" '{print $1}'`
11 |
12 | sed -i "s/etcd_1_address/${etcd1_address}/g" /var/lib/wise2c/tmp/prometheus/etcd.yaml
13 | sed -i "s/etcd_2_address/${etcd2_address}/g" /var/lib/wise2c/tmp/prometheus/etcd.yaml
14 | sed -i "s/etcd_3_address/${etcd3_address}/g" /var/lib/wise2c/tmp/prometheus/etcd.yaml
15 |
16 | kubectl -n monitoring create secret generic etcd-certs --from-file=/etc/etcd/pki/ca.pem --from-file=/etc/etcd/pki/etcd.pem --from-file=/etc/etcd/pki/etcd-key.pem
17 |
18 | cat >> kube-prometheus-$KubePrometheusVersion/manifests/prometheus-prometheus.yaml << EOF
19 | secrets:
20 | - etcd-certs
21 | EOF
22 |
23 | kubectl -n monitoring apply -f kube-prometheus-$KubePrometheusVersion/manifests/prometheus-prometheus.yaml
24 |
25 | kubectl -n monitoring apply -f etcd.yaml
26 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/append-lines.txt:
--------------------------------------------------------------------------------
1 | - --alertmanager-default-base-image=ImageRepositoryIP/library/alertmanager
2 | - --prometheus-default-base-image=ImageRepositoryIP/library/prometheus
3 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/clean-images-tags.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /var/lib/wise2c/tmp/prometheus
3 | for file in $(cat images-list.txt); do podman rmi $file ; done
4 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/coredns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | namespace: kube-system
5 | name: coredns-prometheus
6 | labels:
7 | k8s-app: coredns-prometheus
8 | spec:
9 | selector:
10 | k8s-app: kube-dns
11 | clusterIP: None
12 | ports:
13 | - name: http-metrics
14 | port: 9153
15 | ---
16 | apiVersion: monitoring.coreos.com/v1
17 | kind: ServiceMonitor
18 | metadata:
19 | labels:
20 | k8s-app: coredns
21 | name: coredns
22 | namespace: monitoring
23 | spec:
24 | endpoints:
25 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
26 | interval: 15s
27 | port: http-metrics
28 | jobLabel: k8s-app
29 | namespaceSelector:
30 | matchNames:
31 | - kube-system
32 | selector:
33 | matchLabels:
34 | k8s-app: coredns-prometheus
35 |
36 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/etcd-address.txt:
--------------------------------------------------------------------------------
1 | etcd-address
2 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/etcd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: etcd-k8s
5 | labels:
6 | k8s-app: etcd
7 | spec:
8 | type: ClusterIP
9 | clusterIP: None
10 | ports:
11 | - name: api
12 | port: 2379
13 | protocol: TCP
14 | ---
15 | apiVersion: v1
16 | kind: Endpoints
17 | metadata:
18 | name: etcd-k8s
19 | labels:
20 | k8s-app: etcd
21 | subsets:
22 | - addresses:
23 | - ip: etcd_1_address
24 | nodeName: etcd1
25 | - ip: etcd_2_address
26 | nodeName: etcd2
27 | - ip: etcd_3_address
28 | nodeName: etcd3
29 | ports:
30 | - name: api
31 | port: 2379
32 | protocol: TCP
33 | ---
34 | apiVersion: monitoring.coreos.com/v1
35 | kind: ServiceMonitor
36 | metadata:
37 | name: etcd-k8s
38 | labels:
39 | k8s-app: etcd-k8s
40 | spec:
41 | jobLabel: k8s-app
42 | endpoints:
43 | - port: api
44 | interval: 30s
45 | scheme: https
46 | tlsConfig:
47 | caFile: /etc/prometheus/secrets/etcd-certs/ca.pem
48 | certFile: /etc/prometheus/secrets/etcd-certs/etcd.pem
49 | keyFile: /etc/prometheus/secrets/etcd-certs/etcd-key.pem
50 | #use insecureSkipVerify only if you cannot use a Subject Alternative Name
51 | insecureSkipVerify: true
52 | #serverName: ETCD
53 | selector:
54 | matchLabels:
55 | k8s-app: etcd
56 | namespaceSelector:
57 | matchNames:
58 | - monitoring
59 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/harbor-address.txt:
--------------------------------------------------------------------------------
1 | harbor-address
2 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/kube-controller-manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | namespace: kube-system
5 | name: kube-controller-manager-prometheus-discovery
6 | labels:
7 | k8s-app: kube-controller-manager
8 | spec:
9 | selector:
10 | component: kube-controller-manager
11 | type: ClusterIP
12 | clusterIP: None
13 | ports:
14 | - name: http-metrics
15 | port: 10252
16 | targetPort: 10252
17 | protocol: TCP
18 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/kube-scheduler.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | namespace: kube-system
5 | name: kube-scheduler-prometheus-discovery
6 | labels:
7 | k8s-app: kube-scheduler
8 | spec:
9 | selector:
10 | component: kube-scheduler
11 | type: ClusterIP
12 | clusterIP: None
13 | ports:
14 | - name: http-metrics
15 | port: 10251
16 | targetPort: 10251
17 | protocol: TCP
18 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/node-ports.txt:
--------------------------------------------------------------------------------
1 | prometheus-svc-port: prometheus-port
2 | alertmanager-svc-port: alertmanager-port
3 | grafana-svc-port: grafana-port
4 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/file/remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd /var/lib/wise2c/tmp/prometheus
4 | version_path=`more components-version.txt |grep "KubePrometheus Version" |awk '{print $3}'`
5 |
6 | kubectl delete --ignore-not-found=true -f /var/lib/wise2c/tmp/prometheus/kube-prometheus-${version_path}/manifests/
7 | kubectl delete --ignore-not-found=true -f /var/lib/wise2c/tmp/prometheus/kube-prometheus-${version_path}/manifests/setup
8 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/group_vars/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/prometheus-playbook/version/group_vars/README.md
--------------------------------------------------------------------------------
/prometheus-playbook/version/group_vars/prometheus.yml:
--------------------------------------------------------------------------------
1 | cpath: /var/lib/wise2c/tmp/prometheus
2 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/inherent.yaml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | path=`dirname $0`
6 |
7 | KubePrometheusVersion=`cat ${path}/components-version.txt |grep "KubePrometheus" |awk '{print $3}'`
8 | PrometheusOperatorVersion=`cat ${path}/components-version.txt |grep "PrometheusOperator" |awk '{print $3}'`
9 |
10 | echo "" >> ${path}/group_vars/prometheus.yml.gotmpl
11 | echo "kube_prometheus_version: ${KubePrometheusVersion}" >> ${path}/group_vars/prometheus.yml
12 | echo "operator_version: ${PrometheusOperatorVersion}" >> ${path}/group_vars/prometheus.yml
13 |
14 | curl -L -o ${path}/file/kube-prometheus-v$KubePrometheusVersion-origin.tar.gz https://github.com/coreos/kube-prometheus/archive/v$KubePrometheusVersion.tar.gz
15 |
16 | cd ${path}/file/
17 | tar zxf kube-prometheus-v$KubePrometheusVersion-origin.tar.gz
18 |
19 | for file in $(grep -lr "quay.io/coreos" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "quay.io/coreos" ; done > image-lists-temp.txt
20 | for file in $(grep -lr "quay.io/brancz" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "quay.io/brancz" ; done >> image-lists-temp.txt
21 | for file in $(grep -lr "grafana/grafana\:" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "grafana/grafana\:" ; done >> image-lists-temp.txt
22 | for file in $(grep -lr "quay.io/prometheus" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "quay.io/prometheus" ; done >> image-lists-temp.txt
23 | for file in $(grep -lr "registry.k8s.io/" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "registry.k8s.io/" ; done >> image-lists-temp.txt
24 | for file in $(grep -lr "jimmidyson/" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "jimmidyson/" ; done >> image-lists-temp.txt
25 | for file in $(grep -lr "directxman12/" kube-prometheus-$KubePrometheusVersion/manifests/); do cat $file |grep "directxman12/" ; done >> image-lists-temp.txt
26 |
27 | prometheus_base_image=`cat kube-prometheus-$KubePrometheusVersion/manifests/prometheus-prometheus.yaml |grep "image: " |awk -F':' '{print $2}'`
28 | prometheus_image_tag=`cat kube-prometheus-$KubePrometheusVersion/manifests/prometheus-prometheus.yaml |grep "image: " |awk -F':' '{print $3}'`
29 |
30 | alertmanager_base_image=`cat kube-prometheus-$KubePrometheusVersion/manifests/alertmanager-alertmanager.yaml |grep "image: " |awk -F':' '{print $2}'`
31 | alertmanager_image_tag=`cat kube-prometheus-$KubePrometheusVersion/manifests/alertmanager-alertmanager.yaml |grep "image: " |awk -F':' '{print $3}'`
32 |
33 | echo $prometheus_base_image:$prometheus_image_tag >> image-lists-temp.txt
34 | echo $alertmanager_base_image:$alertmanager_image_tag >> image-lists-temp.txt
35 |
36 | rm -rf kube-prometheus-$KubePrometheusVersion
37 |
38 | sed "s/- --config-reloader-image=//g" image-lists-temp.txt > 1.txt
39 | sed "s/- --prometheus-config-reloader=//g" 1.txt > 2.txt
40 | sed "s/image: //g" 2.txt > 3.txt
41 | rm -f image-lists-temp.txt 1.txt 2.txt
42 | mv 3.txt images-list.txt
43 |
44 | cat images-list.txt
45 |
46 | for file in $(cat images-list.txt); do docker pull $file; done
47 | echo 'Images pulled.'
48 |
49 | docker save $(cat images-list.txt) -o kube-prometheus-images-v$KubePrometheusVersion.tar
50 | echo 'Images saved.'
51 | bzip2 -z --best kube-prometheus-images-v$KubePrometheusVersion.tar
52 | echo 'Images are compressed as bzip format.'
53 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/properties.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "variable": "self",
4 | "label": "PrometheusOperator installation node",
5 | "description": "please select a kubernetes worker node for installation",
6 | "type": "host",
7 | "default": "",
8 | "required": true
9 | },
10 | {
11 | "variable": "PrometheusNodePort",
12 | "label": "NodePort for Prometheus",
13 | "description": "NodePort for Prometheus service, e.g.: 30900",
14 | "type": "int",
15 | "default": "30900",
16 | "required": true
17 | },
18 | {
19 | "variable": "AlertManagerNodePort",
20 | "label": "NodePort for AlertManager",
21 | "description": "NodePort for AlertManager service, e.g.: 30903",
22 | "type": "int",
23 | "default": "30903",
24 | "required": true
25 | },
26 | {
27 | "variable": "GrafanaNodePort",
28 | "label": "NodePort for Grafana",
29 | "description": "NodePort for Grafana service, e.g.: 30902",
30 | "type": "int",
31 | "default": "30902",
32 | "required": true
33 | }
34 | ]
35 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/reset.ansible:
--------------------------------------------------------------------------------
1 | - name: clean prometheus operator
2 | hosts: all
3 | user: root
4 | tasks:
5 | - name: copy k8s admin.conf for prometheus installation
6 | copy:
7 | src: '{{ item.src }}'
8 | dest: '{{ item.dest }}'
9 | with_items:
10 | - { src: '../../kubernetes-playbook/{{ kubernetes_version }}/file/admin.conf', dest: '{{ ansible_env.HOME }}/.kube/config' }
11 |
12 | - name: setup kubectl certification
13 | shell: |
14 | sed -i "s/.*server:.*/ server: https:\/\/{{ kubernetes_endpoint }}/g" $HOME/.kube/config
15 | chown $(id -u):$(id -g) $HOME/.kube/config
16 |
17 | - name: stop & rm prometheus service
18 | shell: ./remove.sh
19 | args:
20 | chdir: '{{ cpath }}'
21 | ignore_errors: true
22 |
23 | - name: remove kubectl cert
24 | file:
25 | path: '{{ item }}'
26 | state: absent
27 | with_items:
28 | - '{{ ansible_env.HOME }}/.kube/config'
29 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/template/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wise2c-devops/breeze/66f8cc295dc4f8eec7a5674ebdd76a0b06236f8c/prometheus-playbook/version/template/README.md
--------------------------------------------------------------------------------
/prometheus-playbook/version/template/alertmanager-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | app.kubernetes.io/component: alert-router
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.21.0
10 | name: alertmanager-main
11 | namespace: monitoring
12 | spec:
13 | type: NodePort
14 | ports:
15 | - name: web
16 | nodePort: {{ alertmanager_nodeport }}
17 | port: 9093
18 | targetPort: web
19 | selector:
20 | alertmanager: main
21 | #app: alertmanager
22 | app.kubernetes.io/component: alert-router
23 | app.kubernetes.io/name: alertmanager
24 | app.kubernetes.io/part-of: kube-prometheus
25 | sessionAffinity: ClientIP
26 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/template/grafana-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 7.5.4
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | type: NodePort
13 | ports:
14 | - name: http
15 | nodePort: {{ grafana_nodeport }}
16 | port: 3000
17 | targetPort: http
18 | selector:
19 | app.kubernetes.io/component: grafana
20 | app.kubernetes.io/name: grafana
21 | app.kubernetes.io/part-of: kube-prometheus
22 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/template/prometheus-service.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/name: prometheus
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.26.0
9 | prometheus: k8s
10 | name: prometheus-k8s
11 | namespace: monitoring
12 | spec:
13 | type: NodePort
14 | ports:
15 | - name: web
16 | nodePort: {{ prometheus_nodeport }}
17 | port: 9090
18 | targetPort: web
19 | selector:
20 | #app: prometheus
21 | app.kubernetes.io/component: prometheus
22 | app.kubernetes.io/name: prometheus
23 | app.kubernetes.io/part-of: kube-prometheus
24 | prometheus: k8s
25 | sessionAffinity: ClientIP
26 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/yat/all.yml.gotmpl:
--------------------------------------------------------------------------------
1 | registry_endpoint: {{ .harbor.Inherent.endpoint }}
2 | registry_user: {{ .harbor.Inherent.user }}
3 | registry_password: {{ .harbor.Inherent.password }}
4 | registry_project: library
5 | etcd_endpoint: {{ .etcd.Inherent.endpoint }}
6 | prometheus_nodeport: {{ .prometheus.Property.PrometheusNodePort }}
7 | alertmanager_nodeport: {{ .prometheus.Property.AlertManagerNodePort }}
8 | grafana_nodeport: {{ .prometheus.Property.GrafanaNodePort }}
9 | kubernetes_endpoint: {{ .kubernetes.Inherent.endpoint }}
10 | kubernetes_version: {{ .kubernetes.Inherent.version }}
11 | etcd_version: {{ .etcd.Inherent.etcd_version }}
12 |
--------------------------------------------------------------------------------
/prometheus-playbook/version/yat/hosts.gotmpl:
--------------------------------------------------------------------------------
1 | [prometheus]
2 | {{ range $v := .prometheus.Hosts.self -}}
3 | {{ $v.IP }}
4 | {{ end }}
5 |
--------------------------------------------------------------------------------