├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.yaml
│ └── enhancement.yaml
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── mirror.yml
│ └── stale.yml
├── .gitignore
├── README.md
├── ansible.cfg
├── docs
├── deprecated
│ ├── kuboard.md
│ └── practice
│ │ ├── dockerize_system_service.md
│ │ ├── es_cluster.md
│ │ ├── go_web_app
│ │ ├── Dockerfile
│ │ ├── Dockerfile-more
│ │ ├── hellogo.go
│ │ ├── hellogo.yaml
│ │ └── readme.md
│ │ ├── java_war_app.md
│ │ └── mariadb_cluster.md
├── guide
│ ├── argocd.md
│ ├── chrony.md
│ ├── dashboard.1.6.3.md
│ ├── dashboard.2.x.md
│ ├── dashboard.md
│ ├── efk.md
│ ├── gitlab
│ │ ├── app.yaml.md
│ │ ├── config.sh.md
│ │ ├── gitlab-ci.yml.md
│ │ ├── gitlab-install.md
│ │ ├── gitlab-runner.md
│ │ ├── pics
│ │ │ ├── active-runner.jpg
│ │ │ ├── cicd-pipeline.jpg
│ │ │ ├── cicd-setting.jpg
│ │ │ └── runner.jpg
│ │ └── readme.md
│ ├── harbor.md
│ ├── helm.md
│ ├── hpa.md
│ ├── index.md
│ ├── ingress-tls.md
│ ├── ingress.md
│ ├── ipvs.md
│ ├── istio.md
│ ├── jenkins.md
│ ├── kernel_upgrade.md
│ ├── kubeapps.md
│ ├── kubedns.md
│ ├── kubesphere.md
│ ├── log-pilot.md
│ ├── metallb.md
│ ├── metrics-server.md
│ ├── networkpolicy.md
│ ├── nfs-server.md
│ ├── prometheus.md
│ └── rollingupdateWithZeroDowntime.md
├── mixes
│ ├── DoneList.md
│ ├── HowToContribute.md
│ ├── LICENSE
│ ├── conformance.md
│ └── donate.md
├── op
│ ├── ch_apiserver_cert.md
│ ├── cluster_restore.md
│ ├── force_ch_certs.md
│ ├── kcfg-adm.md
│ ├── loadballance_ingress_nodeport.md
│ ├── op-etcd.md
│ ├── op-index.md
│ ├── op-master.md
│ ├── op-node.md
│ └── upgrade.md
├── release-notes
│ ├── kubeasz-3.5.0.md
│ ├── kubeasz-3.5.1.md
│ ├── kubeasz-3.5.2.md
│ ├── kubeasz-3.5.3.md
│ ├── kubeasz-3.6.0.md
│ ├── kubeasz-3.6.1.md
│ ├── kubeasz-3.6.2.md
│ ├── kubeasz-3.6.3.md
│ ├── kubeasz-3.6.4.md
│ ├── kubeasz-3.6.5.md
│ └── kubeasz-3.6.6.md
└── setup
│ ├── 00-planning_and_overall_intro.md
│ ├── 01-CA_and_prerequisite.md
│ ├── 02-install_etcd.md
│ ├── 03-container_runtime.md
│ ├── 04-install_kube_master.md
│ ├── 05-install_kube_node.md
│ ├── 06-install_network_plugin.md
│ ├── 07-install_cluster_addon.md
│ ├── 08-cluster-storage.md
│ ├── config_guide.md
│ ├── ex-lb.md
│ ├── ezctl.md
│ ├── kubeasz_on_public_cloud.md
│ ├── multi_os.md
│ ├── multi_platform.md
│ ├── network-plugin
│ ├── calico-bgp-rr.md
│ ├── calico.md
│ ├── cilium-example.md
│ ├── cilium.md
│ ├── flannel.md
│ ├── kube-ovn.md
│ ├── kube-router.md
│ └── network-check.md
│ ├── offline_install.md
│ └── quickStart.md
├── example
├── config.yml
├── hosts.allinone
└── hosts.multi-node
├── ezctl
├── ezdown
├── manifests
├── efk
│ ├── es-dynamic-pv
│ │ └── es-statefulset.yaml
│ ├── es-index-rotator
│ │ └── rotator.yaml
│ ├── es-service.yaml
│ ├── es-static-pv
│ │ ├── es-pv0.yaml
│ │ ├── es-pv1.yaml
│ │ ├── es-pv2.yaml
│ │ └── es-statefulset.yaml
│ ├── es-without-pv
│ │ └── es-statefulset.yaml
│ ├── fluentd-es-configmap.yaml
│ ├── fluentd-es-ds.yaml
│ ├── kibana-deployment.yaml
│ ├── kibana-service.yaml
│ └── log-pilot
│ │ └── log-pilot-filebeat.yaml
├── es-cluster
│ ├── elasticsearch
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── OWNERS
│ │ ├── README.md
│ │ ├── templates
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── client-deployment.yaml
│ │ │ ├── client-pdb.yaml
│ │ │ ├── client-svc.yaml
│ │ │ ├── configmap.yaml
│ │ │ ├── data-pdb.yaml
│ │ │ ├── data-statefulset.yaml
│ │ │ ├── master-pdb.yaml
│ │ │ ├── master-statefulset.yaml
│ │ │ └── master-svc.yaml
│ │ └── values.yaml
│ └── es-values.yaml
├── ingress
│ ├── nginx-ingress
│ │ ├── nginx-ingress-svc.yaml
│ │ ├── nginx-ingress.yaml
│ │ ├── tcp-services-configmap.yaml
│ │ └── udp-services-configmap.yaml
│ ├── test-hello.ing.yaml
│ ├── traefik
│ │ ├── tls
│ │ │ ├── hello-tls.ing.yaml
│ │ │ ├── k8s-dashboard.ing.yaml
│ │ │ └── traefik-controller.yaml
│ │ ├── traefik-ingress.yaml
│ │ └── traefik-ui.ing.yaml
│ ├── whoami.ing.yaml
│ └── whoami.yaml
├── jenkins
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── OWNERS
│ ├── README.md
│ ├── templates
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── config.yaml
│ │ ├── home-pvc.yaml
│ │ ├── jenkins-agent-svc.yaml
│ │ ├── jenkins-master-deployment.yaml
│ │ ├── jenkins-master-ingress.yaml
│ │ ├── jenkins-master-networkpolicy.yaml
│ │ ├── jenkins-master-svc.yaml
│ │ ├── jenkins-test.yaml
│ │ ├── jobs.yaml
│ │ ├── rbac.yaml
│ │ ├── secret.yaml
│ │ ├── service-account.yaml
│ │ └── test-config.yaml
│ └── values.yaml
├── mariadb-cluster
│ ├── mariadb
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── OWNERS
│ │ ├── README.md
│ │ ├── files
│ │ │ └── docker-entrypoint-initdb.d
│ │ │ │ └── README.md
│ │ ├── templates
│ │ │ ├── NOTES.txt
│ │ │ ├── _helpers.tpl
│ │ │ ├── initialization-configmap.yaml
│ │ │ ├── master-configmap.yaml
│ │ │ ├── master-statefulset.yaml
│ │ │ ├── master-svc.yaml
│ │ │ ├── secrets.yaml
│ │ │ ├── slave-configmap.yaml
│ │ │ ├── slave-statefulset.yaml
│ │ │ ├── slave-svc.yaml
│ │ │ ├── test-runner.yaml
│ │ │ └── tests.yaml
│ │ ├── values-production.yaml
│ │ └── values.yaml
│ └── my-values.yaml
├── mysql-cluster
│ ├── mysql-configmap.yaml
│ ├── mysql-services.yaml
│ ├── mysql-statefulset.yaml
│ └── mysql-test-client.yaml
├── redis-cluster
│ ├── redis-ha
│ │ ├── Chart.yaml
│ │ ├── OWNERS
│ │ ├── README.md
│ │ ├── ci
│ │ │ └── haproxy-enabled-values.yaml
│ │ ├── templates
│ │ │ ├── NOTES.txt
│ │ │ ├── _configs.tpl
│ │ │ ├── _helpers.tpl
│ │ │ ├── redis-auth-secret.yaml
│ │ │ ├── redis-ha-announce-service.yaml
│ │ │ ├── redis-ha-configmap.yaml
│ │ │ ├── redis-ha-exporter-script-configmap.yaml
│ │ │ ├── redis-ha-pdb.yaml
│ │ │ ├── redis-ha-role.yaml
│ │ │ ├── redis-ha-rolebinding.yaml
│ │ │ ├── redis-ha-service.yaml
│ │ │ ├── redis-ha-serviceaccount.yaml
│ │ │ ├── redis-ha-servicemonitor.yaml
│ │ │ ├── redis-ha-statefulset.yaml
│ │ │ ├── redis-haproxy-deployment.yaml
│ │ │ ├── redis-haproxy-service.yaml
│ │ │ ├── redis-haproxy-serviceaccount.yaml
│ │ │ ├── redis-haproxy-servicemonitor.yaml
│ │ │ └── tests
│ │ │ │ ├── test-redis-ha-configmap.yaml
│ │ │ │ └── test-redis-ha-pod.yaml
│ │ └── values.yaml
│ ├── start.sh
│ └── values.yaml
└── storage
│ ├── local-storage
│ ├── example-sts.yml
│ ├── local-pv1.yml
│ ├── local-pv2.yml
│ └── local-storage-class.yml
│ └── test.yaml
├── pics
├── alipay.gif
├── ha-1x.gif
├── ha-3x.svg
├── kubeasz.svg
└── wxpay.gif
├── playbooks
├── 01.prepare.yml
├── 02.etcd.yml
├── 03.runtime.yml
├── 04.kube-master.yml
├── 05.kube-node.yml
├── 06.network.yml
├── 07.cluster-addon.yml
├── 10.ex-lb.yml
├── 11.harbor.yml
├── 21.addetcd.yml
├── 22.addnode.yml
├── 23.addmaster.yml
├── 31.deletcd.yml
├── 32.delnode.yml
├── 33.delmaster.yml
├── 90.setup.yml
├── 91.start.yml
├── 92.stop.yml
├── 93.upgrade.yml
├── 94.backup.yml
├── 95.restore.yml
├── 96.update-certs.yml
└── 99.clean.yml
├── roles
├── calico
│ ├── tasks
│ │ ├── calico-rr.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── bgp-default.yaml.j2
│ │ ├── bgp-rr.yaml.j2
│ │ ├── calico-csr.json.j2
│ │ ├── calico-v3.19.yaml.j2
│ │ ├── calico-v3.23.yaml.j2
│ │ ├── calico-v3.24.yaml.j2
│ │ ├── calico-v3.26.yaml.j2
│ │ ├── calico-v3.28.yaml.j2
│ │ └── calicoctl.cfg.j2
│ └── vars
│ │ └── main.yml
├── chrony
│ ├── chrony.yml
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── chronyd.service.j2
│ │ ├── client.conf.j2
│ │ └── server.conf.j2
├── cilium
│ ├── cilium.yml
│ ├── files
│ │ ├── cilium-1.16.3.tgz
│ │ └── star_war_example
│ │ │ ├── http-sw-app.yaml
│ │ │ ├── sw_l3_l4_l7_policy.yaml
│ │ │ └── sw_l3_l4_policy.yaml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── values.yaml.j2
├── clean
│ ├── clean_node.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── clean_chrony.yml
│ │ ├── clean_etcd.yml
│ │ ├── clean_lb.yml
│ │ ├── clean_master.yml
│ │ ├── clean_node.yml
│ │ └── main.yml
├── cluster-addon
│ ├── files
│ │ ├── elasticsearch-0.9.1.tgz
│ │ ├── kube-prometheus-stack-45.23.0.tgz
│ │ ├── kubeapps-12.4.3.tgz
│ │ ├── kubeblocks-0.9.3.tgz
│ │ ├── kubeblocks_crds.yaml
│ │ ├── kubernetes-dashboard-7.12.0.tgz
│ │ ├── minio-0.9.0.tgz
│ │ ├── mongodb-0.9.1.tgz
│ │ ├── mysql-0.9.1.tgz
│ │ ├── postgresql-0.9.0.tgz
│ │ └── redis-0.9.1.tgz
│ ├── tasks
│ │ ├── cilium_connectivity_check.yml
│ │ ├── coredns.yml
│ │ ├── dashboard.yml
│ │ ├── kubeapps.yml
│ │ ├── kubeblocks.yml
│ │ ├── local-storage.yml
│ │ ├── main.yml
│ │ ├── metrics-server.yml
│ │ ├── network_check.yml
│ │ ├── nfs-provisioner.yml
│ │ ├── nodelocaldns.yml
│ │ └── prometheus.yml
│ ├── templates
│ │ ├── cilium-check
│ │ │ ├── check-part1.yaml.j2
│ │ │ ├── connectivity-check.yaml.j2
│ │ │ └── namespace.yaml.j2
│ │ ├── dashboard
│ │ │ ├── admin-user-sa-rbac.yaml.j2
│ │ │ ├── dashboard-values.yaml.j2
│ │ │ └── read-user-sa-rbac.yaml.j2
│ │ ├── dns
│ │ │ ├── coredns.yaml.j2
│ │ │ ├── kubedns.yaml.j2
│ │ │ ├── nodelocaldns-iptables.yaml.j2
│ │ │ └── nodelocaldns-ipvs.yaml.j2
│ │ ├── kubeapps
│ │ │ ├── kubeapps-admin-token.yaml.j2
│ │ │ ├── single-namespace-edit-token.yaml.j2
│ │ │ ├── single-namespace-view-token.yaml.j2
│ │ │ └── values.yaml.j2
│ │ ├── kubeblocks
│ │ │ ├── es-cluster.yaml.j2
│ │ │ ├── kb-values.yaml.j2
│ │ │ ├── minio-cluster.yaml.j2
│ │ │ ├── mongodb-cluster.yaml.j2
│ │ │ ├── mysql-cluster.yaml.j2
│ │ │ ├── pg-cluster.yaml.j2
│ │ │ └── redis-cluster.yaml.j2
│ │ ├── local-storage
│ │ │ ├── local-path-storage.yaml.j2
│ │ │ └── test-pod.yaml.j2
│ │ ├── metrics-server
│ │ │ └── components.yaml.j2
│ │ ├── network-check
│ │ │ ├── namespace.yaml.j2
│ │ │ └── network-check.yaml.j2
│ │ ├── nfs-provisioner
│ │ │ ├── nfs-provisioner.yaml.j2
│ │ │ └── test-pod.yaml.j2
│ │ └── prometheus
│ │ │ ├── dingtalk-webhook.yaml
│ │ │ ├── etcd-client-csr.json.j2
│ │ │ ├── example-config-alertsmanager.yaml
│ │ │ └── values.yaml.j2
│ └── vars
│ │ └── main.yml
├── cluster-restore
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── containerd
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── config.toml.j2
│ │ ├── containerd.service.j2
│ │ └── crictl.yaml.j2
├── deploy
│ ├── deploy.yml
│ ├── tasks
│ │ ├── add-custom-kubectl-kubeconfig.yml
│ │ ├── create-kube-controller-manager-kubeconfig.yml
│ │ ├── create-kube-proxy-kubeconfig.yml
│ │ ├── create-kube-scheduler-kubeconfig.yml
│ │ ├── create-kubectl-kubeconfig.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── admin-csr.json.j2
│ │ ├── ca-config.json.j2
│ │ ├── ca-csr.json.j2
│ │ ├── crb.yaml.j2
│ │ ├── kube-controller-manager-csr.json.j2
│ │ ├── kube-proxy-csr.json.j2
│ │ ├── kube-scheduler-csr.json.j2
│ │ └── user-csr.json.j2
│ └── vars
│ │ └── main.yml
├── docker
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── daemon.json.j2
│ │ └── docker.service.j2
│ └── vars
│ │ └── main.yml
├── etcd
│ ├── clean-etcd.yml
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── etcd-csr.json.j2
│ │ └── etcd.service.j2
├── ex-lb
│ ├── clean-ex-lb.yml
│ ├── defaults
│ │ └── main.yml
│ ├── ex-lb.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── keepalived-backup.conf.j2
│ │ ├── keepalived-master.conf.j2
│ │ ├── keepalived.service.j2
│ │ ├── l4lb.conf.j2
│ │ └── l4lb.service.j2
├── flannel
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── kube-flannel.yaml.j2
├── harbor
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── harbor-csr.json.j2
│ │ ├── harbor-v1.10.yml.j2
│ │ ├── harbor-v2.10.yml.j2
│ │ ├── harbor-v2.11.yml.j2
│ │ └── harbor-v2.12.yml.j2
│ └── vars
│ │ └── main.yml
├── kube-lb
│ ├── clean-kube-lb.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── kube-lb.conf.j2
│ │ └── kube-lb.service.j2
├── kube-master
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── aggregator-proxy-csr.json.j2
│ │ ├── kube-apiserver.service.j2
│ │ ├── kube-controller-manager.service.j2
│ │ ├── kube-scheduler.service.j2
│ │ └── kubernetes-csr.json.j2
│ └── vars
│ │ └── main.yml
├── kube-node
│ ├── tasks
│ │ ├── create-kubelet-kubeconfig.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── cni-default.conf.j2
│ │ ├── kube-proxy-config.yaml.j2
│ │ ├── kube-proxy.service.j2
│ │ ├── kubelet-config.yaml.j2
│ │ ├── kubelet-csr.json.j2
│ │ └── kubelet.service.j2
│ └── vars
│ │ └── main.yml
├── kube-ovn
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── coredns.yaml.j2
│ │ ├── install.sh.j2
│ │ ├── nodelocaldns-iptables.yaml.j2
│ │ └── nodelocaldns-ipvs.yaml.j2
│ └── vars
│ │ └── main.yml
├── kube-router
│ ├── kube-router.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── kuberouter.yaml.j2
├── os-harden
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── apt.yml
│ │ ├── auditd.yml
│ │ ├── hardening.yml
│ │ ├── limits.yml
│ │ ├── login_defs.yml
│ │ ├── main.yml
│ │ ├── minimize_access.yml
│ │ ├── modprobe.yml
│ │ ├── pam.yml
│ │ ├── profile.yml
│ │ ├── rhosts.yml
│ │ ├── securetty.yml
│ │ ├── selinux.yml
│ │ ├── suid_sgid.yml
│ │ ├── sysctl.yml
│ │ ├── user_accounts.yml
│ │ └── yum.yml
│ ├── templates
│ │ ├── etc
│ │ │ ├── audit
│ │ │ │ └── auditd.conf.j2
│ │ │ ├── default
│ │ │ │ └── ufw.j2
│ │ │ ├── initramfs-tools
│ │ │ │ └── modules.j2
│ │ │ ├── libuser.conf.j2
│ │ │ ├── login.defs.j2
│ │ │ ├── modprobe.d
│ │ │ │ └── modprobe.j2
│ │ │ ├── pam.d
│ │ │ │ └── rhel_system_auth.j2
│ │ │ ├── profile.d
│ │ │ │ └── profile.conf.j2
│ │ │ ├── securetty.j2
│ │ │ └── sysconfig
│ │ │ │ └── rhel_sysconfig_init.j2
│ │ └── usr
│ │ │ └── share
│ │ │ └── pam-configs
│ │ │ ├── pam_passwdqd.j2
│ │ │ └── pam_tally2.j2
│ └── vars
│ │ ├── Amazon.yml
│ │ ├── Archlinux.yml
│ │ ├── Debian.yml
│ │ ├── Fedora.yml
│ │ ├── Oracle Linux.yml
│ │ ├── RedHat-6.yml
│ │ ├── RedHat.yml
│ │ ├── Suse.yml
│ │ └── main.yml
└── prepare
│ ├── files
│ └── sctp.conf
│ ├── tasks
│ ├── common.yml
│ ├── debian.yml
│ ├── main.yml
│ ├── offline.yml
│ ├── redhat.yml
│ └── suse.yml
│ ├── templates
│ ├── 10-k8s-modules.conf.j2
│ ├── 30-k8s-ulimits.conf.j2
│ ├── 95-k8s-journald.conf.j2
│ └── 95-k8s-sysctl.conf.j2
│ └── vars
│ └── main.yml
└── tools
├── imgutils
├── kubectl-node_shell
├── kubetail
└── yc-ssh-key-copy.sh
/.github/ISSUE_TEMPLATE/enhancement.yaml:
--------------------------------------------------------------------------------
1 | name: Enhancement Tracking Issue
2 | description: Provide supporting details for a feature in development
3 | labels: kind/feature
4 | body:
5 | - type: textarea
6 | id: feature
7 | attributes:
8 | label: What would you like to be added?
9 | description: |
10 | Feature requests are unlikely to make progress as issues.
11 | A proposal that works through the design along with the implications of the change can be opened as a KEP.
12 | validations:
13 | required: true
14 |
15 | - type: textarea
16 | id: rationale
17 | attributes:
18 | label: Why is this needed?
19 | validations:
20 | required: true
21 |
--------------------------------------------------------------------------------
/.github/workflows/mirror.yml:
--------------------------------------------------------------------------------
1 | name: Mirroring
2 |
3 | on:
4 | push:
5 | #branches:
6 | # - 'master'
7 | tags:
8 | - '*.*.*'
9 |
10 | jobs:
11 | to_gitee:
12 | runs-on: ubuntu-latest
13 | steps: # <-- must use actions/checkout before mirroring!
14 | - uses: actions/checkout@v2
15 | with:
16 | fetch-depth: 0
17 | - uses: pixta-dev/repository-mirroring-action@v1
18 | with:
19 | target_repo_url:
20 | git@gitee.com:easzlab/kubeasz.git
21 | ssh_private_key:
22 | ${{ secrets.SYNCGITEE }} # 密钥 (secret)
23 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Close inactive issues
2 | on:
3 | schedule:
4 | - cron: "1 21 * * *"
5 |
6 | jobs:
7 | close-issues:
8 | runs-on: ubuntu-latest
9 | permissions:
10 | issues: write
11 | pull-requests: write
12 | steps:
13 | - uses: actions/stale@v5
14 | with:
15 | operations-per-run: 50
16 | days-before-issue-stale: 30
17 | days-before-issue-close: 7
18 | stale-issue-label: "stale"
19 | stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
20 | close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
21 | days-before-pr-stale: -1
22 | days-before-pr-close: -1
23 | repo-token: ${{ secrets.GITHUB_TOKEN }}
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # download directory
2 | down/*
3 |
4 | # binaries directory
5 | bin/*
6 |
7 | # k8s storage manifests
8 | manifests/storage/*
9 | !manifests/storage/test.yaml
10 | !manifests/storage/local-storage/
11 |
12 | # role based variable settings, exclude roles/os-harden/vars/
13 | #/roles/*/vars/*
14 | #!/roles/os-harden/vars/
15 |
16 | # cluster instances
17 | clusters/
18 |
--------------------------------------------------------------------------------
/docs/deprecated/practice/go_web_app/Dockerfile:
--------------------------------------------------------------------------------
1 | # a demon for containerize golang web apps
2 | #
3 | # @author:
4 | # @repo:
5 | # @ref:
6 |
7 | # stage 1: build src code to binary
8 | FROM golang:1.13-alpine3.10 as builder
9 |
10 | COPY *.go /app/
11 |
12 | RUN cd /app && go build -o hellogo .
13 |
14 | # stage 2: use alpine as base image
15 | FROM alpine:3.10
16 |
17 | RUN apk update && \
18 | apk --no-cache add tzdata ca-certificates && \
19 | cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
20 | apk del tzdata && \
21 | rm -rf /var/cache/apk/*
22 |
23 | COPY --from=builder /app/hellogo /hellogo
24 |
25 | CMD ["/hellogo"]
26 |
--------------------------------------------------------------------------------
/docs/deprecated/practice/go_web_app/Dockerfile-more:
--------------------------------------------------------------------------------
1 | # build stage
2 | FROM golang:1.13 as builder
3 |
4 | # ENV GOPROXY=https://goproxy.cn
5 | # 设置 GOPROXY 是为编译时能够通过代理下载Qiang外的包
6 | # 设置 GOPRIVATE 是为编译时下载本地gitlab上的包时候不使用代理
7 | ENV GOPROXY=https://goproxy.io
8 | ENV GOPRIVATE=gitlab.yourdomain.com/*
9 |
10 | WORKDIR /root
11 |
12 | COPY ./ .
13 |
14 | # 本地 gitlab 上的项目非公开,编译时需要用 ssh key 的方式下载本地 gitlab 包
15 | # 提前把 ssh key 中的公钥上传到gitlab 个人profile中的 SSH KEY 中
16 | # 在 docker build 时通过命令行参数用--build-arg 'SSH_PKEY=${KEY_TXT}' 传入
17 | # 在 CICD 流水线中,${KEY_TXT} 可以是jenkins中的secret-text参数,也可以是gitlab-ci中的secret variables
18 | ARG SSH_PKEY
19 |
20 | # 设置 git config 是为了拉区项目时使用ssh方式 git@gitlab.yourdomain.com:xxx/yyy.git
21 | #
22 |
23 | RUN git config --global url."git@gitlab.yourdomain.com:".insteadof "https://gitlab.yourdomain.com/" && \
24 | mkdir -p /root/.ssh && \
25 | echo "-----BEGIN RSA PRIVATE KEY-----" > /root/.ssh/id_rsa && \
26 | echo "${SSH_PKEY}" >> /root/.ssh/id_rsa && \
27 | echo "-----END RSA PRIVATE KEY-----" >> /root/.ssh/id_rsa && \
28 | sed -i "2s/ /\\n/g" /root/.ssh/id_rsa && \
29 | echo "StrictHostKeyChecking no" > /root/.ssh/config && \
30 | chmod 600 /root/.ssh/id_rsa
31 |
32 | RUN go mod tidy && \
33 | go mod download
34 |
35 | RUN CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o main cmd/main.go
36 |
37 | # final stage
38 | FROM alpine:3.10
39 |
40 | WORKDIR /home/admin/bin
41 |
42 | COPY --from=builder /root/main .
43 |
44 | CMD ["./main"]
45 |
--------------------------------------------------------------------------------
/docs/deprecated/practice/go_web_app/hellogo.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "math/rand"
7 | "net/http"
8 | "time"
9 | )
10 |
11 | var appVersion = "1.2" //Default/fallback version
12 | var instanceNum int
13 |
14 | func getFrontpage(w http.ResponseWriter, r *http.Request) {
15 | t := time.Now()
16 | fmt.Fprintf(w, "Hello, Go! I'm instance %d running version %s at %s\n", instanceNum, appVersion, t.Format("2019-01-02 15:04:05"))
17 | }
18 |
19 | func health(w http.ResponseWriter, r *http.Request) {
20 | w.WriteHeader(http.StatusOK)
21 | }
22 |
23 | func getVersion(w http.ResponseWriter, r *http.Request) {
24 | fmt.Fprintf(w, "%s\n", appVersion)
25 | }
26 |
27 | func main() {
28 | rand.Seed(time.Now().UTC().UnixNano())
29 | instanceNum = rand.Intn(1000)
30 | http.HandleFunc("/", getFrontpage)
31 | http.HandleFunc("/health", health)
32 | http.HandleFunc("/version", getVersion)
33 | log.Fatal(http.ListenAndServe(":3000", nil))
34 | }
35 |
--------------------------------------------------------------------------------
/docs/deprecated/practice/go_web_app/hellogo.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hellogo-deploy
6 | spec:
7 | replicas: 3
8 | minReadySeconds: 5 # Wait 5 seconds after each new pod comes up before marked as "ready"
9 | strategy:
10 | type: RollingUpdate # describe how we do rolling updates
11 | rollingUpdate:
12 | maxUnavailable: 1 # When updating take one pod down at a time
13 | maxSurge: 1
14 | selector:
15 | matchLabels:
16 | name: hellogo-app
17 | template:
18 | metadata:
19 | labels:
20 | name: hellogo-app
21 | spec:
22 | containers:
23 | - name: hellogo
24 | image: hellogo:v1.0
25 | imagePullPolicy: IfNotPresent
26 | resources:
27 | requests:
28 | memory: "32Mi"
29 | cpu: "50m"
30 | limits:
31 | memory: "64Mi"
32 | cpu: "100m"
33 | ports:
34 | - containerPort: 3000
35 |
36 | ---
37 | apiVersion: v1
38 | kind: Service
39 | metadata:
40 | name: hellogo-svc
41 | spec:
42 | type: NodePort
43 | ports:
44 | - name: http
45 | port: 80
46 | targetPort: 3000
47 | nodePort: 30000
48 | selector:
49 | name: hellogo-app
50 |
--------------------------------------------------------------------------------
/docs/guide/gitlab/pics/active-runner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/docs/guide/gitlab/pics/active-runner.jpg
--------------------------------------------------------------------------------
/docs/guide/gitlab/pics/cicd-pipeline.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/docs/guide/gitlab/pics/cicd-pipeline.jpg
--------------------------------------------------------------------------------
/docs/guide/gitlab/pics/cicd-setting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/docs/guide/gitlab/pics/cicd-setting.jpg
--------------------------------------------------------------------------------
/docs/guide/gitlab/pics/runner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/docs/guide/gitlab/pics/runner.jpg
--------------------------------------------------------------------------------
/docs/guide/index.md:
--------------------------------------------------------------------------------
1 | ## 使用指南
2 |
3 | ### 附加组件安装
4 |
5 | - 安装 [kubedns](kubedns.md)
6 | - 安装 [dashboard](dashboard.md)
7 | - 安装 [metrics-server](metrics-server.md)
8 | - 安装 [prometheus](prometheus.md)
9 | - 安装 [kubeapps](kubeapps.md)
10 | - 安装 [ingress](ingress.md)
11 | - 安装 [helm](helm.md)
12 | - 安装 [efk](efk.md)
13 | - 安装 [harbor](harbor.md)
14 | - 安装 [metallb](metallb.md)
15 |
16 | ### 基础特性演示
17 |
18 | - 自动水平伸缩 [Horizontal Pod Autoscaling](hpa.md)
19 | - 网络安全策略 [Network Policy](networkpolicy.md)
20 | - 滚动更新 [rollingupdate](rollingupdateWithZeroDowntime.md)
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/guide/ipvs.md:
--------------------------------------------------------------------------------
1 | # IPVS 服务负载均衡
2 |
3 | kube-proxy 组件监听 API server 中 service 和 endpoint 的变化情况,从而为 k8s 集群内部的 service 提供动态负载均衡。在v1.10之前主要通过 iptables来实现,是稳定、推荐的方式,但是当服务多的时候会产生太多的 iptables 规则,大规模情况下有明显的性能问题;在v1.11 GA的 ipvs高性能负载模式,采用增量式更新,并可以保证 service 更新期间连接的保持。
4 |
5 | - NOTE: k8s v1.11.0 CentOS7下使用ipvs模式会有问题(见 kubernetes/kubernetes#65461),测试 k8s v1.10.2 CentOS7 可以。
6 |
7 | ## 启用 ipvs
8 |
9 | 建议 k8s 版本1.13 及以后启用 ipvs,只要在 kube-proxy 启动参数(或者配置文件中)中增加 `--proxy-mode=ipvs`:
10 |
11 | ``` bash
12 | [Unit]
13 | Description=Kubernetes Kube-Proxy Server
14 | After=network.target
15 |
16 | [Service]
17 | WorkingDirectory=/var/lib/kube-proxy
18 | ExecStart={{ bin_dir }}/kube-proxy \
19 | --bind-address={{ NODE_IP }} \
20 | --hostname-override={{ NODE_IP }} \
21 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
22 | --logtostderr=true \
23 | --proxy-mode=ipvs
24 | Restart=on-failure
25 | RestartSec=5
26 | LimitNOFILE=65536
27 |
28 | [Install]
29 | WantedBy=multi-user.target
30 | ```
31 |
--------------------------------------------------------------------------------
/docs/guide/log-pilot.md:
--------------------------------------------------------------------------------
1 | # Log-Pilot Elasticsearch Kibana 日志解决方案
2 |
3 | 该方案是社区方案`EFK`的升级版,它支持两种搜集形式,对应容器标准输出日志和容器内的日志文件;个人使用了一把,在原有`EFK`经验的基础上非常简单、方便,值得推荐;更多的关于`log-pilot`的介绍详见链接:
4 |
5 | - github 项目地址: https://github.com/AliyunContainerService/log-pilot
6 | - 阿里云介绍文档: https://help.aliyun.com/document_detail/86552.html
7 | - 介绍文档2: https://yq.aliyun.com/articles/674327
8 |
9 | ## 安装步骤
10 |
11 | - 1.安装 ES 集群,同[EFK](efk.md)文档
12 |
13 | - 2.安装 Kibana,同[EFK](efk.md)文档
14 |
15 | - 3.安装 Log-Pilot
16 |
17 | ``` bash
18 | kubectl apply -f /etc/kubeasz/manifests/efk/log-pilot/log-pilot-filebeat.yaml
19 | ```
20 |
21 | - 4.创建示例应用,采集日志
22 |
23 | ``` bash
24 | $ cat > tomcat.yaml << EOF
25 | apiVersion: v1
26 | kind: Pod
27 | metadata:
28 | name: tomcat
29 | spec:
30 | containers:
31 | - name: tomcat
32 | image: "tomcat:7.0"
33 | env:
34 | # 1、stdout为约定关键字,表示采集标准输出日志
35 | # 2、配置标准输出日志采集到ES的catalina索引下
36 | - name: aliyun_logs_catalina
37 | value: "stdout"
38 | # 1、配置采集容器内文件日志,支持通配符
39 | # 2、配置该日志采集到ES的access索引下
40 | - name: aliyun_logs_access
41 | value: "/usr/local/tomcat/logs/catalina.*.log"
42 | volumeMounts:
43 | - name: tomcat-log
44 | mountPath: /usr/local/tomcat/logs
45 | volumes:
46 | # 容器内文件日志路径需要配置emptyDir
47 | - name: tomcat-log
48 | emptyDir: {}
49 | EOF
50 |
51 | $ kubectl apply -f tomcat.yaml
52 | ```
53 |
54 | - 5.在 kibana 创建 Index Pattern,验证日志已搜集,如上示例应用,应创建如下 index pattern
55 | - catalina-*
56 | - access-*
57 |
--------------------------------------------------------------------------------
/docs/guide/metallb.md:
--------------------------------------------------------------------------------
1 | # metallb 网络负载均衡
2 |
3 | 本文档已过期,以下内容仅做介绍,安装请参考最新官方文档
4 |
5 | `Metallb`是在自有硬件上(非公有云)实现 `Kubernetes Load-balancer`的工具,由`google`团队开源,值得推荐!项目[github主页](https://github.com/google/metallb)。
6 |
7 | ## metallb 简介
8 |
9 | 这里简单介绍下它的实现原理,具体可以参考[metallb官网](https://metallb.universe.tf/),文档非常简洁、清晰。目前有如下的使用限制:
10 |
11 | - `Kubernetes v1.9.0`版本以上,暂不支持`ipvs`模式
12 | - 支持网络组件 (flannel/weave/romana), calico 部分支持
13 | - `layer2`和`bgp`两种模式,其中`bgp`模式需要外部网络设备支持`bgp`协议
14 |
15 | `metallb`主要实现了两个功能:地址分配和对外宣告
16 |
17 | - 地址分配:需要向网络管理员申请一段ip地址,如果是layer2模式需要这段地址与node节点地址同个网段(同一个二层);如果是bgp模式没有这个限制。
18 | - 对外宣告:layer2模式使用arp协议,利用节点的mac额外宣告一个loadbalancer的ip(同mac多ip);bgp模式下节点利用bgp协议与外部网络设备建立邻居,宣告loadbalancer的地址段给外部网络。
19 |
20 |
--------------------------------------------------------------------------------
/docs/mixes/DoneList.md:
--------------------------------------------------------------------------------
1 | ## 前言
2 |
3 | `kubeasz`项目开始于`2017.11`,半年多时间以来,从最开始单一的ansible部署脚本朝着提供部署高可用 K8S集群的完整解决方案的目标不断前进,接下去项目的发展需要各位的共同参与和贡献,希望越做越好,为国内k8s学习、实践者提供更多帮助。
4 |
5 | ### 项目已完成部分
6 |
7 |
8 |
9 | 类型 |
10 | 描述 |
11 | 备注 |
12 |
13 |
14 | 集群部署 |
15 | 服务器基础安全加固与参数优化 |
16 | 已完成 |
17 |
18 |
19 | 基础服务 |
20 | 集群监控告警-prometheus |
21 | 已完成基础,待优化 |
22 |
23 |
24 | 应用服务 |
25 | jenkins集成 |
26 | 已完成 |
27 |
28 |
29 | 集群部署 |
30 | kube-router网络插件 |
31 | 已完成 |
32 |
33 |
34 | 基础服务 |
35 | metrics server |
36 | 已完成 |
37 |
38 |
39 | 集群部署 |
40 | ipvs代理模式跟进 |
41 | 已完成 |
42 |
43 |
44 | 集群部署 |
45 | cilium网络插件 |
46 | 已完成 |
47 |
48 |
49 | 集群部署 |
50 | 集群内时间同步-Chrony |
51 | 已完成 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/docs/mixes/HowToContribute.md:
--------------------------------------------------------------------------------
1 | # 为项目`kubeasz`提交`pull request`
2 |
3 | 首先请核对下本地git config配置的用户名和邮箱与你github上的注册用户和邮箱一致,否则即使`pull request`被接受,贡献者列表中也看不到自己的名字,设置命令:
4 |
5 | ``` bash
6 | $ git config --global user.email "you@example.com"
7 | $ git config --global user.name "Your Name"
8 | ```
9 |
10 | - 1.登录github,在本项目页面点击`fork`到自己仓库
11 | - 2.clone 自己的仓库到本地:`git clone https://github.com/xxx/kubeasz.git`
12 | - 3.在 master 分支添加原始仓库为上游分支:`git remote add upstream https://github.com/easzlab/kubeasz.git`
13 | - 4.在本地新建开发分支:`git checkout -b dev`
14 | - 5.在开发分支修改代码并提交:`git add .`, `git commit -am 'xx变更说明'`
15 | - 6.切换至 master 分支,同步原始仓库:`git checkout master`, `git pull upstream master`
16 | - 7.切换至 dev 分支,合并本地 master 分支(已经和原始仓库同步),可能需要解冲突:`git checkout dev`, `git merge master`
17 | - 8.提交本地 dev 分支到自己的远程 dev 仓库:`git push origin dev`
18 | - 9.在github自己仓库页面,点击`Compare & pull request`给原始仓库发 pull request 请求
19 | - a.等待原作者回复(接受/拒绝)
20 |
--------------------------------------------------------------------------------
/docs/mixes/donate.md:
--------------------------------------------------------------------------------
1 | # 捐赠
2 |
3 | 如果觉得本项目对您有帮助,请小小鼓励下项目作者,谢谢!
4 |
5 | 支付宝码(左)和微信钱包码(右)
6 |
7 |  
8 |
9 |
--------------------------------------------------------------------------------
/docs/op/ch_apiserver_cert.md:
--------------------------------------------------------------------------------
1 | # 修改 APISERVER(MASTER)证书
2 |
3 | `kubeasz` 创建集群后,APISERVER(MASTER)证书默认 CN 包含如下`域名`和`IP`:参见`roles/kube-master/templates/kubernetes-csr.json.j2`
4 |
5 | ```
6 | "hosts": [
7 | "127.0.0.1",
8 | {% if groups['ex_lb']|length > 0 %}
9 | "{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}",
10 | {% endif %}
11 | {% for host in groups['kube_master'] %}
12 | "{{ host }}",
13 | {% endfor %}
14 | "{{ CLUSTER_KUBERNETES_SVC_IP }}",
15 | {% for host in MASTER_CERT_HOSTS %}
16 | "{{ host }}",
17 | {% endfor %}
18 | "kubernetes",
19 | "kubernetes.default",
20 | "kubernetes.default.svc",
21 | "kubernetes.default.svc.cluster",
22 | "kubernetes.default.svc.cluster.local"
23 | ],
24 | ```
25 |
26 | 有的时候(比如apiserver地址通过边界防火墙的NAT转换成公网IP访问,或者需要添加公网域名访问)我们需要在 APISERVER(MASTER)证书中添加一些`域名`或者`IP`,可以方便操作如下:
27 |
28 | ## 1.修改配置文件`/etc/kubeasz/clusters/${集群名}/config.yaml`
29 |
30 | ``` bash
31 | # k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
32 | MASTER_CERT_HOSTS:
33 | - "10.1.1.1"
34 | - "k8s.test.io"
35 | #- "www.test.com"
36 | ```
37 |
38 | ## 2.执行新证书生成并重启apiserver
39 |
40 | ``` bash
41 | $ ezctl setup ${集群名} 04 -t change_cert,restart_master
42 | ```
43 |
--------------------------------------------------------------------------------
/docs/op/force_ch_certs.md:
--------------------------------------------------------------------------------
1 | # 强制更新CA和所有证书
2 |
3 | - WARNNING: 此命令使用需要小心谨慎,确保了解功能背景和可能的结果;执行后,它会重新创建集群CA证书以及由它颁发的所有其他证书;一般适合于集群admin.conf不小心泄露,为了避免集群被非法访问,重新创建CA,从而使已泄漏的admin.conf失效。
4 |
5 | - 如果需要分发受限的kubeconfig,强烈建议使用[自定义权限和期限的kubeconfig](kcfg-adm.md)
6 |
7 | ## 使用帮助
8 |
9 | 确认需要强制更新后,在ansible 控制节点使用如下命令:(xxx 表示需要操作的集群名)
10 |
11 | ``` bash
12 | docker exec -it kubeasz ezctl kca-renew xxx
13 | # 或者使用 dk ezctl kca-renew xxx
14 | ```
15 |
16 | 上述命令执行后,按序进行以下的操作:详见`playbooks/96.update-certs.yml`
17 |
18 | - 重新生成CA证书,以及各种kubeconfig
19 | - 签发新etcd证书,并使用新证书重启etcd服务
20 | - 签发新kube-apiserver 证书,并重启kube-apiserver/kube-controller-manager/kube-scheduler 服务
21 | - 签发新kubelet 证书,并重启kubelet/kube-proxy 服务
22 | - 重启网络组件pod
23 | - 重启其他集群组件pod
24 |
25 | - **特别注意:** 如果集群中运行的业务负载pod需要访问apiserver,需要重启这些pod
26 |
27 | ## 检查验证
28 |
29 | 更新完毕,注意检查集群组件日志和容器pod日志,确认集群处于正常状态
30 |
31 | - 集群组件日志:使用journalctl -u xxxx.service -f 依次检查 etcd.service/kube-apiserver.service/kube-controller-manager.service/kube-scheduler.service/kubelet.service/kube-proxy.service
32 | - 容器pod日志:使用 kubectl logs 方式检查容器日志
33 |
--------------------------------------------------------------------------------
/docs/op/op-etcd.md:
--------------------------------------------------------------------------------
1 | # 管理 etcd 集群
2 |
3 | Etcd 集群支持在线改变集群成员节点,可以增加、修改、删除成员节点;不过改变成员数量仍旧需要满足集群成员多数同意原则(quorum),另外请记住集群成员数量变化的影响:
4 |
5 | - 注意:如果etcd 集群有故障节点,务必先删除故障节点,然后添加新节点,[参考FAQ](https://etcd.io/docs/v3.4.0/faq/)
6 | - 增加 etcd 集群节点, 提高集群稳定性
7 | - 增加 etcd 集群节点, 提高集群读性能(所有节点数据一致,客户端可以从任意节点读取数据)
8 | - 增加 etcd 集群节点, 降低集群写性能(所有节点数据一致,每一次写入会需要所有节点数据同步)
9 |
10 | ## 备份 etcd 数据
11 |
12 | 1. 手动在任意正常 etcd 节点上执行备份:
13 |
14 | ``` bash
15 | # snapshot备份
16 | $ ETCDCTL_API=3 etcdctl snapshot save backup.db
17 | # 查看备份
18 | $ ETCDCTL_API=3 etcdctl --write-out=table snapshot status backup.db
19 | ```
20 |
21 | 2. 使用 kubeasz 备份
22 | _cluster_name_ 为 k8s-01
23 |
24 | ``` bash
25 | ezctl backup k8s-01
26 | ```
27 |
28 | 使用 crontab 定时备份示例(使用 容器化的 kubeasz,每日01:01 备份)
29 | ```
30 | 1 1 * * * /usr/bin/docker exec -i kubeasz ezctl backup k8s-01
31 | ```
32 |
33 | 备份文件在
34 |
35 | ```
36 | {{ base_dir }}/clusters/k8s-01/backup
37 | ```
38 |
39 | ## etcd 集群节点操作
40 |
41 | 执行如下 (假设待操作节点为 192.168.1.11,集群名称test-k8s):
42 |
43 | - 增加 etcd 节点:
44 |
45 | ``` bash
46 | # ssh 免密码登录
47 | $ ssh-copy-id 192.168.1.11
48 |
49 | # 新增节点
50 | $ ezctl add-etcd test-k8s 192.168.1.11
51 | ```
52 |
53 | - 删除 etcd 节点:`$ ezctl del-etcd test-k8s 192.168.1.11`
54 |
55 | 具体操作流程参考 ezctl中 add-etcd/del-etcd 相关函数和playbooks/ 目录的操作剧本
56 |
57 | ### 验证 etcd 集群
58 |
59 | ``` bash
60 | # 登录任意etcd节点验证etcd集群状态
61 | $ export ETCDCTL_API=3
62 | $ etcdctl member list
63 |
64 | # 验证所有etcd节点服务状态和日志
65 | $ systemctl status etcd
66 | $ journalctl -u etcd -f
67 | ```
68 |
69 | ## 参考
70 |
71 | - 官方文档 https://etcd.io/docs/v3.5/op-guide/runtime-configuration/
72 |
--------------------------------------------------------------------------------
/docs/op/op-index.md:
--------------------------------------------------------------------------------
1 | # 集群运维管理指南 operation guide
2 |
3 | - [管理 NODE 节点](op-node.md)
4 | - [管理 MASTER 节点](op-master.md)
5 | - [管理 ETCD 节点](op-etcd.md)
6 | - [升级 K8S 版本](upgrade.md)
7 | - [集群备份与恢复](cluster_restore.md)
8 | - [管理分发用户 kubeconfig](kcfg-adm.md)
9 | - [修改 APISERVER 证书](ch_apiserver_cert.md)
10 | - [强制更新CA和所有证书](force_ch_certs.md)
11 | - [配置负载转发 ingress nodeport](loadballance_ingress_nodeport.md)
12 |
--------------------------------------------------------------------------------
/docs/op/op-node.md:
--------------------------------------------------------------------------------
1 | # 管理 node 节点
2 |
3 | 目录
4 | - 1.增加 kube_node 节点
5 | - 2.增加非标准ssh端口节点
6 | - 3.删除 kube_node 节点
7 |
8 | ## 1.增加 kube_node 节点
9 |
10 | 新增`kube_node`节点大致流程为:(参考ezctl 里面add-node函数 和 playbooks/22.addnode.yml)
11 | - [可选]新节点安装 chrony 时间同步
12 | - 新节点预处理 prepare
13 | - 新节点安装 container runtime
14 | - 新节点安装 kube_node 服务
15 | - 新节点安装网络插件相关
16 |
17 | ### 操作步骤
18 |
19 | 执行如下 (假设待增加节点为 192.168.1.11,k8s集群名为 test-k8s):
20 |
21 | ``` bash
22 | # ssh 免密码登录
23 | $ ssh-copy-id 192.168.1.11
24 |
25 | # 新增节点
26 | $ ezctl add-node test-k8s 192.168.1.11
27 |
28 | # 同理,重复上面步骤再新增节点并自定义nodename
29 | $ ezctl add-node test-k8s 192.168.1.12 k8s_nodename=worker-03
30 | ```
31 |
32 | ### 验证
33 |
34 | ``` bash
35 | # 验证新节点状态
36 | $ kubectl get node
37 |
38 | # 验证新节点的网络插件calico 或flannel 的Pod 状态
39 | $ kubectl get pod -n kube-system
40 |
41 | # 验证新建pod能否调度到新节点,略
42 | ```
43 |
44 | ## 2.增加非标准ssh端口节点
45 |
46 | 假设待添加节点192.168.2.1,ssh 端口 10022;然后执行
47 |
48 | ``` bash
49 | $ ssh-copy-id -p 10022 192.168.2.1
50 | $ ezctl add-node test-k8s 192.168.2.1 ansible_ssh_port=10022
51 | ```
52 |
53 | - 注意:如果在添加节点时需要设置其他个性化变量,可以同理在后面不断添加
54 |
55 |
56 | ## 3.删除 kube_node 节点
57 |
58 | 删除 node 节点流程:(参考ezctl 里面del-node函数 和 playbooks/32.delnode.yml)
59 | - 检测是否可以删除
60 | - 迁移节点上的 pod
61 | - 删除 node 相关服务及文件
62 | - 从集群删除 node
63 |
64 | ### 操作步骤
65 |
66 | ``` bash
67 | $ ezctl del-node test-k8s 192.168.1.11 # 假设待删除节点为 192.168.1.11
68 | ```
69 |
70 | ### 验证
71 |
72 | 略
73 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.5.0.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.5.0 (Winter Solstice)
2 |
3 | 天时人事日相催,冬至阳生春又来。kubeasz 3.5.0 发布,支持k8s v1.26版本,组件更新以及一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.26.0
8 | - calico: v3.23.5
9 | - cilium: v1.12.4
10 | - dashboard: v2.7.0
11 | - pause: 3.9
12 | - harbor: v2.1.5
13 | - k8s-dns-node-cache: 1.22.13
14 |
15 | ### 调整项目分支更新规则
16 |
17 | k8s大版本对应kubeasz特定的大版本号,详见README.md 中版本对照表,当前积极更新的分支如下:
18 |
19 | - v3.4:对应k8s v1.25 版本,继续保持更新,会使用cherry-pick方式合并后续版本中的重要commit
20 | - v3.5:对应k8s v1.26 版本,持续保持更新
21 | - master:默认保持与最新分支办法同步,当前与v3.5同步
22 |
23 | ### 其他
24 |
25 | - 忽略cloud-init相关文件不存在的错误 (#1206) by itswl
26 | - 自定义 harbor 安装路径 (#1209) by itswl
27 | - 调整集群内api服务地址和dns服务地址设置方式
28 | - 修改 nodelocaldns 上游 dns 配置 (#1210) by itswl
29 | - 增加检测 harbor 端口能否连接 (#1211) by itswl
30 | - 修改 completion 自动补全 (#1213) by itswl
31 | - 增加检测 ex-lb 的 kube-apiserver 是否正常 (#1215) by itswl
32 | - 修复containerd无法拉取harbor的镜像
33 | -
34 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.5.1.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.5.1
2 |
3 | kubeasz 3.5.1 发布,k8s v1.26小版本更新,组件更新以及一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.26.1
8 | - calico: v3.24.5
9 | - chrony: 4.3
10 | - containerd: v1.6.14
11 | - docker: 20.10.22
12 | - keepalived: 2.2.7
13 | - nginx: 1.22.1
14 | - harbor: v2.6.3
15 |
16 | ### 支持设置k8s nodename
17 |
18 | 默认情况下kubeasz项目使用节点ip地址作为nodename,如果需要自定义设置支持两种方式:
19 |
20 | - 1. 在clusters/xxxx/hosts 直接配置:比如
21 |
22 | ```
23 | # work node(s), set unique 'k8s_nodename' for each node
24 | [kube_node]
25 | 192.168.0.80 k8s_nodename=worker-01
26 | 192.168.0.79 k8s_nodename=worker-02
27 | ```
28 |
29 | - 2. 在添加节点适合设置:比如
30 |
31 | ```
32 | dk ezctl add-node xxxx 192.168.0.81 k8s_nodename=worker-03
33 | ```
34 |
35 | 特别注意:k8s_nodename 命名规范,只能由小写字母、数字、'-'、'.' 组成,并且开头和结尾必须为小写字母和数字
36 | 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com')
37 |
38 |
39 | ### 其他
40 |
41 | - 更新etcd文档 (#1218) by itswl
42 | - fix: start/stop scripts for ex-lb
43 | - fix: 'ezctl'-ignore /usr/bin/python link existed warning
44 | - 更新高可用架构图及核心安装文档
45 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.5.2.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.5.2
2 |
3 | kubeasz 3.5.2 发布,解决3.5.1 版本中设置k8s_nodename的bug,以及其他一些fix。
4 |
5 | ### 支持设置k8s nodename
6 |
7 | 修复 ISSUE #1225,感谢 surel9
8 |
9 | 默认情况下kubeasz项目使用节点ip地址作为nodename,如果需要自定义设置支持两种方式:
10 |
11 | - 1. 在clusters/xxxx/hosts 直接配置:比如
12 |
13 | ```
14 | # work node(s), set unique 'k8s_nodename' for each node
15 | [kube_node]
16 | 192.168.0.80 k8s_nodename=worker-01
17 | 192.168.0.79 k8s_nodename=worker-02
18 | ```
19 |
20 | - 2. 在添加节点适合设置:比如
21 |
22 | ```
23 | dk ezctl add-node xxxx 192.168.0.81 k8s_nodename=worker-03
24 | ```
25 |
26 | 特别注意:k8s_nodename 命名规范,只能由小写字母、数字、'-'、'.' 组成,并且开头和结尾必须为小写字母和数字
27 | 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com')
28 |
29 |
30 | ### 其他
31 |
32 | - 修复:prepare阶段如果安装系统包失败的错误不应该被忽略
33 | - 修复:清理节点时无法删除calico目录/var/run/calico
34 | - 修复:deploy机器上调度的pod无法通信问题 issue #1224,感谢 bogeit
35 |
36 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.5.3.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.5.3
2 |
3 | kubeasz 3.5.3 发布,组件版本更新,以及修复etcd集群恢复问题。
4 |
5 | ### 组件更新
6 |
7 | - k8s: v1.26.4
8 | - etcd: v3.5.6
9 | - containerd: 1.6.20
10 | - runc: v1.1.5
11 | - cni: v1.2.0
12 | - crictl: v1.26.1
13 | - helm: v3.11.2
14 | - ansible-core: v2.14.4
15 |
16 | ### 集群恢复脚本修复
17 |
18 | PR #1193 引入一个集群恢复bug:多节点etcd集群恢复时,每个节点都选自己为主节点的问题。
19 |
20 | 目前已修复,感谢 zhangshijle 提醒并提供详细测试情况。
21 |
22 | ### 其他
23 |
24 | - 调整:cni 下载目录和运行目录,避免问题 #1248
25 | - 调整:containerd的运行目录
26 | - 修复:离线安装时容器镜像下载脚本
27 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.6.1.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.6.1
2 |
3 | kubeasz 3.6.1 发布:支持k8s v1.27版本,组件更新和一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.27.2
8 | - calico: v3.24.6
9 | - kube-ovn: v1.11.5
10 | - kube-router: v1.5.4
11 |
12 | ### 增加应用部署插件 kubeapps
13 |
14 | Kubeapps 是一个基于 Web 的应用程序,它可以在 Kubernetes 集群上进行一站式安装,并使用户能够部署、管理和升级应用
15 | 程序。https://github.com/easzlab/kubeasz/blob/master/docs/guide/kubeapps.md
16 |
17 | ### 重要更新
18 |
19 | - 重写`ezdown`脚本支持下载额外的应用容器镜像
20 | - 增加`local-path-provisioner`本地文件目录提供者
21 | - 设置允许kubelet并行拉取容器镜像
22 |
23 | ### 其他
24 |
25 | - 增加kubectl-node-shell 脚本
26 | - 修复ansible connect local 是 python 解析器不确定问题
27 | - 修复typo #1273
28 | - 部分文档更新
29 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.6.2.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.6.2
2 |
3 | kubeasz 3.6.2 发布:支持k8s v1.28版本,组件更新和一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.28.1
8 | - etcd: v3.5.9
9 | - containerd: 1.6.23
10 | - runc: v1.1.9
11 | - cni: v1.3.0
12 | - coredns: 1.11.1
13 | - cilium: 1.13.6
14 | - flannel: v0.22.2
15 |
16 | ### 修改kubeasz支持k8s版本对应规则
17 |
18 | 原有模式每个k8s大版本都有推荐对应的kubeasz版本,这样做会导致kubeasz版本碎片化,追踪问题很麻烦,而且也影响普通用户安装体验。从kubeasz 3.6.2版本开始,默认最新版本kubeasz兼容支持安装最新的三个k8s大版本。具体安装说明如下:
19 |
20 | (如果/etc/kubeasz/bin 目录下已经有kube* 文件,需要先删除 rm -f /etc/kubeasz/bin/kube*)
21 |
22 | - 安装 k8s v1.28: 使用 kubeasz 3.6.2,执行./ezdown -D 默认下载即可
23 | - 安装 k8s v1.27: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.27.5 下载
24 | - 安装 k8s v1.26: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.26.8 下载
25 | - 安装 k8s v1.25: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.25.13 下载
26 | - 安装 k8s v1.24: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.24.17 下载
27 |
28 |
29 | ### 重要更新
30 |
31 | - 增加支持containerd 可配置trusted insecure registries
32 | - 修复calico rr 模式的节点设置 #1308
33 | - 修复自定义节点名称设置 /etc/hosts方案
34 | - fix: kubelet failed when enabling kubeReserved or systemReserved
35 |
36 | ### 其他
37 |
38 | - 修复:disable selinux on deploy host
39 | - helm部署redis-ha添加国内可访问镜像 by heyanyanchina123
40 | - 修复多集群管理时, 若当前ezctl配置不是升级集群,会导致升级失败 by learn0208
41 | - add ipvs配置打开strictARP #1298
42 | - revert for supporting k8s version <= 1.26
43 | - add kubetail, by WeiLai
44 | - update manifests:es-cluster/mysql-cluster
45 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.6.3.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.6.3
2 |
3 | kubeasz 3.6.3 发布:支持k8s v1.29版本,组件更新和一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.29.0
8 | - etcd: v3.5.10
9 | - containerd: 1.6.26
10 | - runc: v1.1.10
11 | - calico: v3.26.4
12 | - cilium: 1.14.5
13 |
14 | ### 修改kubeasz支持k8s版本对应规则
15 |
16 | 原有模式每个k8s大版本都有推荐对应的kubeasz版本,这样做会导致kubeasz版本碎片化,追踪问题很麻烦,而且也影响普通用户安装体验。从kubeasz 3.6.2版本开始,默认最新版本kubeasz兼容支持安装最新的三个k8s大版本。具体安装说明如下:
17 |
18 | (如果/etc/kubeasz/bin 目录下已经有kube* 文件,需要先删除 rm -f /etc/kubeasz/bin/kube*)
19 |
20 | - 安装 k8s v1.29: 使用 kubeasz 3.6.3,执行./ezdown -D 默认下载即可
21 | - 安装 k8s v1.28: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.28.5 下载
22 | - 安装 k8s v1.27: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.27.9 下载
23 | - 安装 k8s v1.26: 使用 kubeasz 3.6.2,执行./ezdown -D -k v1.26.12 下载
24 |
25 | ### 重要更新
26 |
27 | - deprecated role: os-harden,因为扩大支持更多linux发行版,系统加固方式无法在各种系统上充分测试,感谢 #1338 issue 反馈问题
28 | - adjust docker setup scripts
29 | - update harbor v2.8.4 and fix harbor setup
30 | - fix nodelocaldns yaml
31 |
32 | ### 其他
33 |
34 | - docs update: add argocd guide
35 | - docs: fix the quickStart.md url in network-plugin
36 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.6.4.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.6.4
2 |
3 | kubeasz 3.6.4 发布:支持k8s v1.30版本,组件更新和一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.30.1
8 | - etcd: v3.5.12
9 | - containerd: 1.7.17
10 | - runc: v1.1.12
11 | - calico: v3.26.4
12 | - cilium: 1.15.5
13 | - cni: v1.4.1
14 | - harbor: v2.10.2
15 | - metrics-server: v0.7.1
16 |
17 | ### 重要更新
18 |
19 | - 安全更新:to solve CVE-2024-21626: update containerd, runc
20 | - 安装流程:role 'prepare' 阶段增加设置hostname,这样当网络组件为calico时不会因为主机名相同而出错;同时在example/config.yml 中增加配置开关`ENABLE_SETTING_HOSTNAME`
21 | - 操作系统:增加测试支持 Ubuntu 2404
22 | - 已知在ubuntu 2404上使用网络插件calico v3.26.4不兼容,提示:ipset v7.11: Kernel and userspace incompatible
23 | - 使用cilium 组件没有问题
24 |
25 | ### 其他
26 |
27 | - 21376465de7f44d1ec997bde096afc7404ce45c5 fix: cilium ui images settings
28 | - c40548e0e33cab3c4e5742aacce11101ac0c7366 #1343, 恢复podPidsLimit=-1默认设置
29 | -
30 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.6.5.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.6.5
2 |
3 | kubeasz 3.6.5 发布:支持k8s v1.31 版本,组件更新和一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.31.2
8 | - etcd: v3.5.16
9 | - containerd: 1.7.23
10 | - runc: v1.1.15
11 | - calico: v3.28.2
12 | - coredns: 1.11.3
13 | - dnsnodecache: 1.23.1
14 | - cilium: 1.16.3
15 | - flannel: v0.26.0
16 | - cni: v1.6.0
17 | - harbor: v2.11.1
18 | - metrics-server: v0.7.2
19 | - pause: 3.10
20 |
21 | ### 更新
22 |
23 | - 修正centos9 下prepare脚本运行的问题 #1397 By GitHubAwan
24 | - style: trim trailing whitespace & add logger source line number #1413 By kelein
25 | - 操作系统:增加测试支持 Ubuntu 2404
26 | - 修复在ubuntu 2404上使用网络插件calico ipSet兼容性问题(calico v3.28.2)
27 |
28 | ### 其他
29 |
30 | - 修复calico hostname 设置
31 | - 更新部分文档
32 | -
33 |
--------------------------------------------------------------------------------
/docs/release-notes/kubeasz-3.6.6.md:
--------------------------------------------------------------------------------
1 | ## kubeasz 3.6.6
2 |
3 | kubeasz 3.6.6 发布:支持k8s v1.32 版本,组件更新和一些bugfix。
4 |
5 | ### 版本更新
6 |
7 | - k8s: v1.32.3
8 | - etcd: v3.5.20
9 | - containerd: 2.0.4
10 | - runc: v1.2.6
11 | - calico: v3.28.3
12 | - coredns: 1.11.4
13 | - cni: v1.6.2
14 | - harbor: v2.12.2
15 |
16 | ### 更新
17 |
18 | - 更新国内docker镜像仓库加速设置,解决ezdown脚本无法下载镜像问题;同步更新containerd 镜像仓库加速设置
19 | - 主要组件大版本更新:containerd 从 1.7.x 更新大版本 2.0.x,更新主要配置文件;runc 从 1.1.x 更新大版本 1.2.x
20 | - 安装逻辑更新:新增节点不再重复执行网络插件安装,避免部分网络插件自动重启业务pod,by gogeof
21 | - 安装逻辑更新:每次执行脚本 containerd 都会被重新安装,不管原先是否已经运行
22 | - 优化更新 ezctl 脚本从 ezdown 加载变量方式,by RadPaperDinosaur
23 |
24 |
25 | ### 其他
26 |
27 | - 修复 CLUSTER_DNS_SVC_IP & CLUSTER_KUBERNETES_SVC_IP 地址生成规则,by yunpiao
28 | - 更新conformance文档
29 | -
30 |
--------------------------------------------------------------------------------
/docs/setup/07-install_cluster_addon.md:
--------------------------------------------------------------------------------
1 | # 07-安装集群主要插件
2 |
3 | 目前挑选一些常用、必要的插件自动集成到安装脚本之中:
4 |
5 | ## 集群默认安装
6 |
7 | - [coredns](../guide/kubedns.md)
8 | - [nodelocaldns](../guide/kubedns.md)
9 | - [metrics-server](../guide/metrics-server.md)
10 | - [dashboard](../guide/dashboard.md)
11 |
12 | kubeasz 默认安装上述基础插件,并支持离线方式安装(./ezdown -D 命令会自动下载组件镜像,并推送到本地镜像仓库easzlab.io.local:5000)
13 |
14 | ## 集群可选安装
15 |
16 | - [prometheus](../guide/prometheus.md)
17 | - [network_check](network-plugin/network-check.md)
18 | - [nfs_provisioner]()
19 |
20 | kubeasz 默认不安装上述插件,可以在配置文件(clusters/xxx/config.yml)中开启,支持离线方式安装(./ezdown -X 会额外下载这些组件镜像,并推送到本地镜像仓库easzlab.io.local:5000)
21 |
22 | ## 安装脚本
23 |
24 | 详见`roles/cluster-addon/` 目录
25 |
26 | - 1.根据hosts文件中配置的`CLUSTER_DNS_SVC_IP` `CLUSTER_DNS_DOMAIN`等参数生成kubedns.yaml和coredns.yaml文件
27 | - 2.注册变量pod_info,pod_info用来判断现有集群是否已经运行各种插件
28 | - 3.根据pod_info和`配置开关`逐个进行/跳过插件安装
29 |
30 | ## 下一步
31 |
32 | - [创建ex_lb节点组](ex-lb.md), 向集群外提供高可用apiserver
33 | - [创建集群持久化存储](08-cluster-storage.md)
34 |
--------------------------------------------------------------------------------
/docs/setup/config_guide.md:
--------------------------------------------------------------------------------
1 | # 个性化集群参数配置
2 |
3 | `kubeasz`创建集群主要在以下两个地方进行配置:(假设集群名xxxx)
4 |
5 | - clusters/xxxx/hosts 文件(模板在example/hosts.multi-node):集群主要节点定义和主要参数配置、全局变量
6 | - clusters/xxxx/config.yml(模板在examples/config.yml):其他参数配置或者部分组件附加参数
7 |
8 | ## clusters/xxxx/hosts (ansible hosts)
9 |
10 | 如[集群规划与安装概览](00-planning_and_overall_intro.md)中介绍,主要包括集群节点定义和集群范围的主要参数配置
11 |
12 | - 尽量保持配置简单灵活
13 | - 尽量保持配置项稳定
14 |
15 | 常用设置项:
16 |
17 | - 修改容器运行时: CONTAINER_RUNTIME="containerd"
18 | - 修改集群网络插件:CLUSTER_NETWORK="calico"
19 | - 修改容器网络地址:CLUSTER_CIDR="192.168.0.0/16"
20 | - 修改NodePort范围:NODE_PORT_RANGE="30000-32767"
21 |
22 | ## clusters/xxxx/config.yml
23 |
24 | 主要包括集群某个具体组件的个性化配置,具体组件的配置项可能会不断增加;可以在不做任何配置更改情况下使用默认值创建集群
25 |
26 | 根据实际需要配置 k8s 集群,常用举例
27 |
28 | - 配置使用离线安装系统包:INSTALL_SOURCE: "offline" (需要ezdown -P 下载离线系统软件)
29 | - 配置CA证书以及其签发证书的有效期
30 | - 配置 apiserver 支持公网域名:MASTER_CERT_HOSTS
31 | - 配置 cluster-addon 组件安装
32 | - ...
33 |
--------------------------------------------------------------------------------
/docs/setup/kubeasz_on_public_cloud.md:
--------------------------------------------------------------------------------
1 | # 公有云上部署 kubeasz
2 |
3 | 在公有云上使用`kubeasz`部署`k8s`集群需要注意以下几个常见问题。
4 |
5 | ### 安全组
6 |
7 | 注意虚机的安全组规则配置,一般集群内部节点之间端口全部放开即可;
8 |
9 | ### 网络组件
10 |
11 | 一般公有云对网络限制较多,跨节点 pod 通讯需要使用 OVERLAY 添加报头;默认配置详见example/config.yml
12 |
13 | - flannel 使用 vxlan 模式:`FLANNEL_BACKEND: "vxlan"`
14 | - calico 开启 ipinip:`CALICO_IPV4POOL_IPIP: "Always"`
15 | - kube-router 开启 ipinip:`OVERLAY_TYPE: "full"`
16 |
17 | ### 节点公网访问
18 |
19 | 可以在安装时每个节点绑定`弹性公网地址`(EIP),装完集群解绑;也可以开通NAT网关,或者利用iptables自建上网网关等方式
20 |
21 | ### 负载均衡
22 |
23 | 一般云厂商会限制使用`keepalived+haproxy`自建负载均衡,你可以根据云厂商文档使用云负载均衡(内网)四层TCP负载模式;
24 |
25 | - kubeasz 2x 版本已无需依赖外部负载均衡实现apiserver的高可用,详见 [2x架构](https://github.com/easzlab/kubeasz/blob/dev2/docs/setup/00-planning_and_overall_intro.md#ha-architecture)
26 | - kubeasz 1x 及以前版本需要负载均衡实现apiserver高可用,详见 [1x架构](https://github.com/easzlab/kubeasz/blob/dev1/docs/setup/00-planning_and_overall_intro.md#ha-architecture)
27 |
28 | ### 时间同步
29 |
30 | 一般云厂商提供的虚机都已默认安装时间同步服务,无需自行安装。
31 |
32 | ### 访问 APISERVER
33 |
34 | 在公有云上安装完集群后,需要在公网访问集群 apiserver,而我们在安装前可能没有规划公网IP或者公网域名;而 apiserver 肯定需要 https 方式访问,在证书创建时需要加入公网ip/域名;可以参考这里[修改 APISERVER(MASTER)证书](../op/ch_apiserver_cert.md)
35 |
36 | ## 在公有云上部署多主高可用集群
37 |
38 | 处理好以上讨论的常见问题后,在公有云上使用 kubeasz 安装集群与自有环境没有差异。
39 |
40 | - 使用 kubeasz 2x 版本安装单节点、单主多节点、多主多节点 k8s 集群,云上云下的预期安装体验完全一致
41 |
--------------------------------------------------------------------------------
/docs/setup/multi_os.md:
--------------------------------------------------------------------------------
1 | # 操作系统说明
2 |
3 | 目前发现部分使用新内核的linux发行版,k8s 安装使用 cgroup v2版本时,有时候安装会失败,需要删除/清理集群后重新安装。已报告可能发生于 Alma Linux 9, Rocky Linux 9, Fedora 37;建议如下步骤处理:
4 |
5 | - 1.确认系统使用的cgroup v2版本
6 | ```
7 | stat -fc %T /sys/fs/cgroup/
8 | cgroup2fs
9 | ```
10 | - 2.初次安装时kubelet可能启动失败,日志报错类似:err="openat2 /sys/fs/cgroup/kubepods.slice/cpu.weight: no such file or directory"
11 |
12 | - 3.建议删除集群然后重新安装,一般能够成功
13 | ```
14 | # 删除集群
15 | dk ezctl destroy xxxx
16 |
17 | # 重启
18 | reboot
19 |
20 | # 启动后重新安装
21 | dk ezctl setup xxxx all
22 | ```
23 |
24 | ## Debian
25 |
26 | - Debian 11:默认可能没有安装iptables,使用kubeasz 安装前需要执行:
27 |
28 | ``` bash
29 | apt update
30 |
31 | apt install iptables -y
32 | ```
33 |
34 | ## Alibaba
35 |
36 | - Alibaba Linux 3.2104 LTS:安装前需要设置如下:
37 |
38 | ``` bash
39 | # 修改使用dnf包管理
40 | sed -i 's/package/dnf/g' /etc/kubeasz/roles/prepare/tasks/redhat.yml
41 | ```
42 |
43 | ## openSUSE
44 |
45 | - openSUSE Leap 15.4:需要安装iptables
46 |
47 | ``` bash
48 | zypper install iptables
49 | ln -s /usr/sbin/iptables /sbin/iptables
50 | ```
51 |
--------------------------------------------------------------------------------
/docs/setup/multi_platform.md:
--------------------------------------------------------------------------------
1 | # 多架构支持
2 |
3 | kubeasz 3.4.1 以后支持多CPU架构,当前已支持linux amd64和linux arm64,更多架构支持根据后续需求来计划。
4 |
5 | ## 使用方式
6 |
7 | kubeasz 多架构安装逻辑:根据部署机器(执行ezdown/ezctl命令的机器)的架构,会自动判断下载对应amd64/arm64的二进制文件和容器镜像,然后推送安装到整个集群。
8 |
9 | - 暂不支持不同架构的机器加入到同一个集群。
10 | - harbor目前仅支持amd64安装
11 |
12 | ## 架构支持备忘
13 |
14 | #### k8s核心组件本身提供多架构的二进制文件/容器镜像下载,项目调整了下载二进制文件的容器dockerfile
15 |
16 | - https://github.com/easzlab/dockerfile-kubeasz-k8s-bin
17 |
18 | #### kubeasz其他用到的二进制或镜像,重新调整了容器创建dockerfile
19 |
20 | - https://github.com/easzlab/dockerfile-kubeasz-ext-bin
21 | - https://github.com/easzlab/dockerfile-kubeasz-ext-build
22 | - https://github.com/easzlab/dockerfile-kubeasz-sys-pkg
23 | - https://github.com/easzlab/dockerfile-kubeasz-mirrored-images
24 | - https://github.com/easzlab/dockerfile-kubeasz
25 | - https://github.com/easzlab/dockerfile-ansible
26 |
27 | #### 其他组件(coredns/network plugin/dashboard/metrics-server等)一般都提供多架构的容器镜像,可以直接下载拉取
28 |
29 |
30 |
--------------------------------------------------------------------------------
/manifests/efk/es-index-rotator/rotator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: es-index-rotator
5 | namespace: kube-system
6 | spec:
7 | # 每天1点3分执行
8 | schedule: "3 1 */1 * *"
9 | jobTemplate:
10 | spec:
11 | template:
12 | spec:
13 | containers:
14 | - name: es-index-rotator
15 | image: easzlab/es-index-rotator:0.2.1
16 | # 保留最近10天日志
17 | command:
18 | - /bin/rotate.sh
19 | - "10"
20 | - "logstash" # fluented 默认创建的index形如'logstash-2020.01.01'
21 | restartPolicy: OnFailure
22 | concurrencyPolicy: Forbid
23 | successfulJobsHistoryLimit: 2
24 | failedJobsHistoryLimit: 1
25 |
--------------------------------------------------------------------------------
/manifests/efk/es-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: elasticsearch-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: elasticsearch-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | kubernetes.io/name: "Elasticsearch"
11 | spec:
12 | ports:
13 | - port: 9200
14 | protocol: TCP
15 | targetPort: db
16 | clusterIP: None
17 | selector:
18 | k8s-app: elasticsearch-logging
19 |
--------------------------------------------------------------------------------
/manifests/efk/es-static-pv/es-pv0.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-es-0
5 | spec:
6 | capacity:
7 | storage: 4Gi
8 | accessModes:
9 | - ReadWriteMany
10 | volumeMode: Filesystem
11 | persistentVolumeReclaimPolicy: Recycle
12 | storageClassName: "es-storage-class"
13 | nfs:
14 | # 根据实际共享目录修改
15 | path: /share/es0
16 | # 根据实际 nfs服务器地址修改
17 | server: 192.168.1.208
18 |
--------------------------------------------------------------------------------
/manifests/efk/es-static-pv/es-pv1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-es-1
5 | spec:
6 | capacity:
7 | storage: 4Gi
8 | accessModes:
9 | - ReadWriteMany
10 | volumeMode: Filesystem
11 | persistentVolumeReclaimPolicy: Recycle
12 | storageClassName: "es-storage-class"
13 | nfs:
14 | # 根据实际共享目录修改
15 | path: /share/es1
16 | # 根据实际 nfs服务器地址修改
17 | server: 192.168.1.208
18 |
--------------------------------------------------------------------------------
/manifests/efk/es-static-pv/es-pv2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-es-2
5 | spec:
6 | capacity:
7 | storage: 4Gi
8 | accessModes:
9 | - ReadWriteMany
10 | volumeMode: Filesystem
11 | persistentVolumeReclaimPolicy: Recycle
12 | storageClassName: "es-storage-class"
13 | nfs:
14 | # 根据实际共享目录修改
15 | path: /share/es2
16 | # 根据实际 nfs服务器地址修改
17 | server: 192.168.1.208
18 |
--------------------------------------------------------------------------------
/manifests/efk/kibana-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: kibana-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kibana-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | spec:
11 | replicas: 1
12 | selector:
13 | matchLabels:
14 | k8s-app: kibana-logging
15 | template:
16 | metadata:
17 | labels:
18 | k8s-app: kibana-logging
19 | annotations:
20 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
21 | spec:
22 | containers:
23 | - name: kibana-logging
24 | #image: docker.elastic.co/kibana/kibana-oss:6.6.1
25 | image: easzlab/kibana-oss:6.6.1
26 | resources:
27 | # need more cpu upon initialization, therefore burstable class
28 | limits:
29 | cpu: 1000m
30 | requests:
31 | cpu: 100m
32 | env:
33 | - name: ELASTICSEARCH_URL
34 | value: http://elasticsearch-logging:9200
35 | # if kibana service is exposed by nodePort, use lines commited out instead
36 | #- name: SERVER_BASEPATH
37 | # value: ""
38 | - name: SERVER_BASEPATH
39 | value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
40 | ports:
41 | - containerPort: 5601
42 | name: ui
43 | protocol: TCP
44 |
--------------------------------------------------------------------------------
/manifests/efk/kibana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kibana-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kibana-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | kubernetes.io/name: "Kibana"
11 | spec:
12 | ports:
13 | - port: 5601
14 | protocol: TCP
15 | targetPort: ui
16 | selector:
17 | k8s-app: kibana-logging
18 | #type: NodePort
19 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/.helmignore:
--------------------------------------------------------------------------------
1 | .git
2 | # OWNERS file for Kubernetes
3 | OWNERS
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/Chart.yaml:
--------------------------------------------------------------------------------
1 | name: elasticsearch
2 | home: https://www.elastic.co/products/elasticsearch
3 | version: 1.7.2
4 | appVersion: 6.4.0
5 | description: Flexible and powerful open source, distributed real-time search and analytics
6 | engine.
7 | icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg
8 | sources:
9 | - https://www.elastic.co/products/elasticsearch
10 | - https://github.com/jetstack/elasticsearch-pet
11 | - https://github.com/giantswarm/kubernetes-elastic-stack
12 | - https://github.com/GoogleCloudPlatform/elasticsearch-docker
13 | - https://github.com/clockworksoul/helm-elasticsearch
14 | - https://github.com/pires/kubernetes-elasticsearch-cluster
15 | maintainers:
16 | - name: simonswine
17 | email: christian@jetstack.io
18 | - name: icereval
19 | email: michael.haselton@gmail.com
20 | - name: rendhalver
21 | email: pete.brown@powerhrg.com
22 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/OWNERS:
--------------------------------------------------------------------------------
1 | approvers:
2 | - simonswine
3 | - icereval
4 | - rendhalver
5 | reviewers:
6 | - simonswine
7 | - icereval
8 | - rendhalver
9 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/templates/client-pdb.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.client.podDisruptionBudget.enabled }}
2 | apiVersion: policy/v1beta1
3 | kind: PodDisruptionBudget
4 | metadata:
5 | labels:
6 | app: {{ template "elasticsearch.name" . }}
7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
8 | component: "{{ .Values.client.name }}"
9 | heritage: {{ .Release.Service }}
10 | release: {{ .Release.Name }}
11 | name: {{ template "elasticsearch.client.fullname" . }}
12 | spec:
13 | {{- if .Values.client.podDisruptionBudget.minAvailable }}
14 | minAvailable: {{ .Values.client.podDisruptionBudget.minAvailable }}
15 | {{- end }}
16 | {{- if .Values.client.podDisruptionBudget.maxUnavailable }}
17 | maxUnavailable: {{ .Values.client.podDisruptionBudget.maxUnavailable }}
18 | {{- end }}
19 | selector:
20 | matchLabels:
21 | app: {{ template "elasticsearch.name" . }}
22 | component: "{{ .Values.client.name }}"
23 | release: {{ .Release.Name }}
24 | {{- end }}
25 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/templates/client-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: {{ template "elasticsearch.name" . }}
6 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
7 | component: "{{ .Values.client.name }}"
8 | heritage: {{ .Release.Service }}
9 | release: {{ .Release.Name }}
10 | name: {{ template "elasticsearch.client.fullname" . }}
11 | {{- if .Values.client.serviceAnnotations }}
12 | annotations:
13 | {{ toYaml .Values.client.serviceAnnotations | indent 4 }}
14 | {{- end }}
15 |
16 | spec:
17 | ports:
18 | - name: http
19 | port: 9200
20 | targetPort: 9200
21 | - name: tcp
22 | port: 9300
23 | targetPort: 9300
24 | selector:
25 | app: {{ template "elasticsearch.name" . }}
26 | component: "{{ .Values.client.name }}"
27 | release: {{ .Release.Name }}
28 | type: {{ .Values.client.serviceType }}
29 | {{- if .Values.client.loadBalancerIP }}
30 | loadBalancerIP: "{{ .Values.client.loadBalancerIP }}"
31 | {{- end }}
32 | {{if .Values.client.loadBalancerSourceRanges}}
33 | loadBalancerSourceRanges:
34 | {{range $rangeList := .Values.client.loadBalancerSourceRanges}}
35 | - {{ $rangeList }}
36 | {{end}}
37 | {{end}}
38 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/templates/data-pdb.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.data.podDisruptionBudget.enabled }}
2 | apiVersion: policy/v1beta1
3 | kind: PodDisruptionBudget
4 | metadata:
5 | labels:
6 | app: {{ template "elasticsearch.name" . }}
7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
8 | component: "{{ .Values.data.name }}"
9 | heritage: {{ .Release.Service }}
10 | release: {{ .Release.Name }}
11 | name: {{ template "elasticsearch.data.fullname" . }}
12 | spec:
13 | {{- if .Values.data.podDisruptionBudget.minAvailable }}
14 | minAvailable: {{ .Values.data.podDisruptionBudget.minAvailable }}
15 | {{- end }}
16 | {{- if .Values.data.podDisruptionBudget.maxUnavailable }}
17 | maxUnavailable: {{ .Values.data.podDisruptionBudget.maxUnavailable }}
18 | {{- end }}
19 | selector:
20 | matchLabels:
21 | app: {{ template "elasticsearch.name" . }}
22 | component: "{{ .Values.data.name }}"
23 | release: {{ .Release.Name }}
24 | {{- end }}
25 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/templates/master-pdb.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.master.podDisruptionBudget.enabled }}
2 | apiVersion: policy/v1beta1
3 | kind: PodDisruptionBudget
4 | metadata:
5 | labels:
6 | app: {{ template "elasticsearch.name" . }}
7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
8 | component: "{{ .Values.master.name }}"
9 | heritage: {{ .Release.Service }}
10 | release: {{ .Release.Name }}
11 | name: {{ template "elasticsearch.master.fullname" . }}
12 | spec:
13 | {{- if .Values.master.podDisruptionBudget.minAvailable }}
14 | minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }}
15 | {{- end }}
16 | {{- if .Values.master.podDisruptionBudget.maxUnavailable }}
17 | maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }}
18 | {{- end }}
19 | selector:
20 | matchLabels:
21 | app: {{ template "elasticsearch.name" . }}
22 | component: "{{ .Values.master.name }}"
23 | release: {{ .Release.Name }}
24 | {{- end }}
25 |
--------------------------------------------------------------------------------
/manifests/es-cluster/elasticsearch/templates/master-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: {{ template "elasticsearch.name" . }}
6 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
7 | component: "{{ .Values.master.name }}"
8 | heritage: {{ .Release.Service }}
9 | release: {{ .Release.Name }}
10 | name: {{ template "elasticsearch.fullname" . }}-discovery
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - port: 9300
15 | targetPort: transport
16 | selector:
17 | app: {{ template "elasticsearch.name" . }}
18 | component: "{{ .Values.master.name }}"
19 | release: {{ .Release.Name }}
20 |
--------------------------------------------------------------------------------
/manifests/es-cluster/es-values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | repository: "jmgao1983/elasticsearch"
3 |
4 | cluster:
5 | name: "es-on-k8s"
6 | env:
7 | MINIMUM_MASTER_NODES: "2"
8 |
9 | client:
10 | serviceType: NodePort
11 |
12 | master:
13 | name: master
14 | replicas: 3
15 | heapSize: "512m"
16 | persistence:
17 | enabled: true
18 | accessMode: ReadWriteOnce
19 | name: data
20 | size: "4Gi"
21 | storageClass: "nfs-es"
22 |
23 | data:
24 | name: data
25 | replicas: 2
26 | heapSize: "1536m"
27 | persistence:
28 | enabled: true
29 | accessMode: ReadWriteOnce
30 | name: data
31 | size: "40Gi"
32 | storageClass: "nfs-es"
33 | terminationGracePeriodSeconds: 3600
34 | resources:
35 | limits:
36 | cpu: "1"
37 | # memory: "2048Mi"
38 | requests:
39 | cpu: "25m"
40 | memory: "1536Mi"
41 | podDisruptionBudget:
42 | enabled: false
43 | # minAvailable: 1
44 | maxUnavailable: 1
45 |
--------------------------------------------------------------------------------
/manifests/ingress/nginx-ingress/nginx-ingress-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ingress-nginx
5 | namespace: ingress-nginx
6 | labels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/part-of: ingress-nginx
9 | spec:
10 | type: NodePort
11 | ports:
12 | - name: http
13 | port: 80
14 | targetPort: 80
15 | protocol: TCP
16 | # 集群hosts文件中设置的 NODE_PORT_RANGE 作为 NodePort的可用范围
17 | # 从默认20000~40000之间选一个可用端口,让ingress-controller暴露给外部的访问
18 | nodePort: 23456
19 | - name: https
20 | port: 443
21 | targetPort: 443
22 | protocol: TCP
23 | # 集群hosts文件中设置的 NODE_PORT_RANGE 作为 NodePort的可用范围
24 | # 从默认20000~40000之间选一个可用端口,让ingress-controller暴露https
25 | nodePort: 23457
26 | - name: test-mysql
27 | port: 3306
28 | targetPort: 3306
29 | protocol: TCP
30 | nodePort: 23306
31 | - name: test-mysql-read
32 | port: 3307
33 | targetPort: 3307
34 | protocol: TCP
35 | nodePort: 23307
36 | - name: test-dns
37 | port: 53
38 | targetPort: 53
39 | protocol: UDP
40 | nodePort: 20053
41 | selector:
42 | app.kubernetes.io/name: ingress-nginx
43 | app.kubernetes.io/part-of: ingress-nginx
44 |
45 |
--------------------------------------------------------------------------------
/manifests/ingress/nginx-ingress/tcp-services-configmap.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: tcp-services
5 | namespace: ingress-nginx
6 | labels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/part-of: ingress-nginx
9 | data:
10 | 3306: "mariadb/mydb-mariadb:3306"
11 | 3307: "mariadb/mydb-mariadb-slave:3306"
12 |
13 |
--------------------------------------------------------------------------------
/manifests/ingress/nginx-ingress/udp-services-configmap.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: udp-services
5 | namespace: ingress-nginx
6 | labels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/part-of: ingress-nginx
9 | data:
10 | 53: "kube-system/kube-dns:53"
11 |
12 |
--------------------------------------------------------------------------------
/manifests/ingress/test-hello.ing.yaml:
--------------------------------------------------------------------------------
1 | # kubectl run test-hello --image=nginx --expose --port=80
2 | apiVersion: networking.k8s.io/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: test-hello
6 | spec:
7 | rules:
8 | - host: hello.test.com
9 | http:
10 | paths:
11 | - path: /
12 | backend:
13 | serviceName: test-hello
14 | servicePort: 80
15 |
--------------------------------------------------------------------------------
/manifests/ingress/traefik/tls/hello-tls.ing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: hello-tls-ingress
5 | annotations:
6 | kubernetes.io/ingress.class: traefik
7 | spec:
8 | rules:
9 | - host: hello.test.com
10 | http:
11 | paths:
12 | - backend:
13 | serviceName: test-hello
14 | servicePort: 80
15 | tls:
16 | - secretName: traefik-cert
17 |
--------------------------------------------------------------------------------
/manifests/ingress/traefik/tls/k8s-dashboard.ing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: kubernetes-dashboard
5 | namespace: kube-system
6 | annotations:
7 | traefik.ingress.kubernetes.io/redirect-entry-point: https
8 | spec:
9 | rules:
10 | - host: dashboard.test.com
11 | http:
12 | paths:
13 | - path: /
14 | backend:
15 | serviceName: kubernetes-dashboard
16 | servicePort: 443
17 |
18 |
--------------------------------------------------------------------------------
/manifests/ingress/traefik/traefik-ui.ing.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: traefik-web-ui
6 | namespace: kube-system
7 | spec:
8 | rules:
9 | - host: traefik-ui.test.com
10 | http:
11 | paths:
12 | - path: /
13 | backend:
14 | serviceName: traefik-ingress-service
15 | servicePort: 8080
16 |
--------------------------------------------------------------------------------
/manifests/ingress/whoami.ing.yaml:
--------------------------------------------------------------------------------
1 | # kubectl run whoami --image=emilevauge/whoami --port=80 --expose
2 | apiVersion: networking.k8s.io/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: test-whoami
6 | spec:
7 | rules:
8 | - host: who.test.com
9 | http:
10 | paths:
11 | - path: /
12 | backend:
13 | serviceName: whoami
14 | servicePort: 80
15 |
16 |
--------------------------------------------------------------------------------
/manifests/ingress/whoami.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: whoami
5 | labels:
6 | app: whoami
7 | spec:
8 | ports:
9 | - name: web
10 | port: 80
11 | targetPort: 80
12 | selector:
13 | app: whoami
14 | sessionAffinity: None
15 | #type: NodePort
16 |
17 | ---
18 | apiVersion: apps/v1
19 | kind: Deployment
20 | metadata:
21 | name: whoami
22 | spec:
23 | replicas: 2
24 | selector:
25 | matchLabels:
26 | app: whoami
27 | template:
28 | metadata:
29 | labels:
30 | app: whoami
31 | spec:
32 | containers:
33 | - name: whoami
34 | image: emilevauge/whoami
35 | ports:
36 | - containerPort: 80
37 |
--------------------------------------------------------------------------------
/manifests/jenkins/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/manifests/jenkins/Chart.yaml:
--------------------------------------------------------------------------------
1 | name: jenkins
2 | home: https://jenkins.io/
3 | version: 0.16.6
4 | appVersion: 2.121.1
5 | description: Open source continuous integration server. It supports multiple SCM tools
6 | including CVS, Subversion and Git. It can execute Apache Ant and Apache Maven-based
7 | projects as well as arbitrary scripts.
8 | sources:
9 | - https://github.com/jenkinsci/jenkins
10 | - https://github.com/jenkinsci/docker-jnlp-slave
11 | maintainers:
12 | - name: lachie83
13 | email: lachlan.evenson@microsoft.com
14 | - name: viglesiasce
15 | email: viglesias@google.com
16 | - name: lusyoe
17 | email: lusyoe@163.com
18 | icon: https://wiki.jenkins-ci.org/download/attachments/2916393/logo.png
19 |
--------------------------------------------------------------------------------
/manifests/jenkins/OWNERS:
--------------------------------------------------------------------------------
1 | approvers:
2 | - lachie83
3 | - viglesiasce
4 | reviewers:
5 | - lachie83
6 | - viglesiasce
7 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "jenkins.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "jenkins.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{- define "jenkins.kubernetes-version" -}}
28 | {{- range .Values.Master.InstallPlugins -}}
29 | {{ if hasPrefix "kubernetes:" . }}
30 | {{- $split := splitList ":" . }}
31 | {{- printf "%s" (index $split 1 ) -}}
32 | {{- end -}}
33 | {{- end -}}
34 | {{- end -}}
35 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/home-pvc.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.Persistence.Enabled (not .Values.Persistence.ExistingClaim) -}}
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | {{- if .Values.Persistence.Annotations }}
6 | annotations:
7 | {{ toYaml .Values.Persistence.Annotations | indent 4 }}
8 | {{- end }}
9 | name: {{ template "jenkins.fullname" . }}
10 | labels:
11 | app: {{ template "jenkins.fullname" . }}
12 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
13 | release: "{{ .Release.Name }}"
14 | heritage: "{{ .Release.Service }}"
15 | spec:
16 | accessModes:
17 | - {{ .Values.Persistence.AccessMode | quote }}
18 | resources:
19 | requests:
20 | storage: {{ .Values.Persistence.Size | quote }}
21 | {{- if .Values.Persistence.StorageClass }}
22 | {{- if (eq "-" .Values.Persistence.StorageClass) }}
23 | storageClassName: ""
24 | {{- else }}
25 | storageClassName: "{{ .Values.Persistence.StorageClass }}"
26 | {{- end }}
27 | {{- end }}
28 | {{- end }}
29 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/jenkins-agent-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ template "jenkins.fullname" . }}-agent
5 | labels:
6 | app: {{ template "jenkins.fullname" . }}
7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
8 | component: "{{ .Release.Name }}-{{ .Values.Master.Component }}"
9 | {{- if .Values.Master.SlaveListenerServiceAnnotations }}
10 | annotations:
11 | {{ toYaml .Values.Master.SlaveListenerServiceAnnotations | indent 4 }}
12 | {{- end }}
13 | spec:
14 | ports:
15 | - port: {{ .Values.Master.SlaveListenerPort }}
16 | targetPort: {{ .Values.Master.SlaveListenerPort }}
17 | name: slavelistener
18 | selector:
19 | component: "{{ .Release.Name }}-{{ .Values.Master.Component }}"
20 | type: {{ .Values.Master.SlaveListenerServiceType }}
21 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/jenkins-master-ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.Master.HostName }}
2 | apiVersion: {{ .Values.Master.Ingress.ApiVersion }}
3 | kind: Ingress
4 | metadata:
5 | {{- if .Values.Master.Ingress.Annotations }}
6 | annotations:
7 | {{ toYaml .Values.Master.Ingress.Annotations | indent 4 }}
8 | {{- end }}
9 | name: {{ template "jenkins.fullname" . }}
10 | spec:
11 | rules:
12 | - host: {{ .Values.Master.HostName | quote }}
13 | http:
14 | paths:
15 | - backend:
16 | serviceName: {{ template "jenkins.fullname" . }}
17 | servicePort: {{ .Values.Master.ServicePort }}
18 | {{- if .Values.Master.Ingress.TLS }}
19 | tls:
20 | {{ toYaml .Values.Master.Ingress.TLS | indent 4 }}
21 | {{- end -}}
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/jenkins-master-networkpolicy.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.NetworkPolicy.Enabled }}
2 | kind: NetworkPolicy
3 | apiVersion: {{ .Values.NetworkPolicy.ApiVersion }}
4 | metadata:
5 | name: "{{ .Release.Name }}-{{ .Values.Master.Component }}"
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | component: "{{ .Release.Name }}-{{ .Values.Master.Component }}"
10 | ingress:
11 | # Allow web access to the UI
12 | - ports:
13 | - port: {{ .Values.Master.ContainerPort }}
14 | # Allow inbound connections from slave
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | "jenkins/{{ .Release.Name }}-{{ .Values.Agent.Component }}": "true"
19 | ports:
20 | - port: {{ .Values.Master.SlaveListenerPort }}
21 | {{- if .Values.Agent.Enabled }}
22 | ---
23 | kind: NetworkPolicy
24 | apiVersion: {{ .Values.NetworkPolicy.ApiVersion }}
25 | metadata:
26 | name: "{{ .Release.Name }}-{{ .Values.Agent.Component }}"
27 | spec:
28 | podSelector:
29 | matchLabels:
30 | # DefaultDeny
31 | "jenkins/{{ .Release.Name }}-{{ .Values.Agent.Component }}": "true"
32 | {{- end }}
33 | {{- end }}
34 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/jenkins-master-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{template "jenkins.fullname" . }}
5 | labels:
6 | app: {{ template "jenkins.fullname" . }}
7 | heritage: {{.Release.Service | quote }}
8 | release: {{.Release.Name | quote }}
9 | chart: "{{.Chart.Name}}-{{.Chart.Version}}"
10 | component: "{{.Release.Name}}-{{.Values.Master.Component}}"
11 | {{- if .Values.Master.ServiceAnnotations }}
12 | annotations:
13 | {{ toYaml .Values.Master.ServiceAnnotations | indent 4 }}
14 | {{- end }}
15 | spec:
16 | ports:
17 | - port: {{.Values.Master.ServicePort}}
18 | name: http
19 | targetPort: {{.Values.Master.ContainerPort}}
20 | {{if (and (eq .Values.Master.ServiceType "NodePort") (not (empty .Values.Master.NodePort)))}}
21 | nodePort: {{.Values.Master.NodePort}}
22 | {{end}}
23 | selector:
24 | component: "{{.Release.Name}}-{{.Values.Master.Component}}"
25 | type: {{.Values.Master.ServiceType}}
26 | {{if eq .Values.Master.ServiceType "LoadBalancer"}}
27 | loadBalancerSourceRanges: {{.Values.Master.LoadBalancerSourceRanges}}
28 | {{if .Values.Master.LoadBalancerIP}}
29 | loadBalancerIP: {{.Values.Master.LoadBalancerIP}}
30 | {{end}}
31 | {{end}}
32 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/jenkins-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}"
5 | annotations:
6 | "helm.sh/hook": test-success
7 | spec:
8 | {{- if .Values.Master.NodeSelector }}
9 | nodeSelector:
10 | {{ toYaml .Values.Master.NodeSelector | indent 4 }}
11 | {{- end }}
12 | {{- if .Values.Master.Tolerations }}
13 | tolerations:
14 | {{ toYaml .Values.Master.Tolerations | indent 4 }}
15 | {{- end }}
16 | initContainers:
17 | - name: "test-framework"
18 | image: "dduportal/bats:0.4.0"
19 | command:
20 | - "bash"
21 | - "-c"
22 | - |
23 | set -ex
24 | # copy bats to tools dir
25 | cp -R /usr/local/libexec/ /tools/bats/
26 | volumeMounts:
27 | - mountPath: /tools
28 | name: tools
29 | containers:
30 | - name: {{ .Release.Name }}-ui-test
31 | image: {{ .Values.Master.Image }}:{{ .Values.Master.ImageTag }}
32 | command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
33 | volumeMounts:
34 | - mountPath: /tests
35 | name: tests
36 | readOnly: true
37 | - mountPath: /tools
38 | name: tools
39 | volumes:
40 | - name: tests
41 | configMap:
42 | name: {{ template "jenkins.fullname" . }}-tests
43 | - name: tools
44 | emptyDir: {}
45 | restartPolicy: Never
46 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/jobs.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.Master.Jobs }}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: {{ template "jenkins.fullname" . }}-jobs
6 | data:
7 | {{ .Values.Master.Jobs | indent 2 }}
8 | {{- end -}}
9 |
--------------------------------------------------------------------------------
/manifests/jenkins/templates/rbac.yaml:
--------------------------------------------------------------------------------
1 | {{ if .Values.rbac.install }}
2 | {{- $serviceName := include "jenkins.fullname" . -}}
3 | apiVersion: rbac.authorization.k8s.io/{{ required "A valid .Values.rbac.apiVersion entry required!" .Values.rbac.apiVersion }}
4 | kind: ClusterRoleBinding
5 | metadata:
6 | name: {{ $serviceName }}-role-binding
7 | labels:
8 | app: {{ $serviceName }}
9 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
10 | release: "{{ .Release.Name }}"
11 | heritage: "{{ .Release.Service }}"
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: {{ .Values.rbac.roleRef }}
16 | subjects:
17 | - kind: ServiceAccount
18 | name: {{ $serviceName }}
19 | namespace: {{ .Release.Namespace }}
20 | {{ end }}
--------------------------------------------------------------------------------
/manifests/jenkins/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.Master.UseSecurity }}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ template "jenkins.fullname" . }}
6 | labels:
7 | app: {{ template "jenkins.fullname" . }}
8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
9 | release: "{{ .Release.Name }}"
10 | heritage: "{{ .Release.Service }}"
11 | type: Opaque
12 | data:
13 | {{ if .Values.Master.AdminPassword }}
14 | jenkins-admin-password: {{ .Values.Master.AdminPassword | b64enc | quote }}
15 | {{ else }}
16 | jenkins-admin-password: {{ randAlphaNum 10 | b64enc | quote }}
17 | {{ end }}
18 | jenkins-admin-user: {{ .Values.Master.AdminUser | b64enc | quote }}
19 | {{- end }}
--------------------------------------------------------------------------------
/manifests/jenkins/templates/service-account.yaml:
--------------------------------------------------------------------------------
1 | {{ if .Values.rbac.install }}
2 | {{- $serviceName := include "jenkins.fullname" . -}}
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: {{ $serviceName }}
7 | labels:
8 | app: {{ $serviceName }}
9 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
10 | release: "{{ .Release.Name }}"
11 | heritage: "{{ .Release.Service }}"
12 | {{ end }}
--------------------------------------------------------------------------------
/manifests/jenkins/templates/test-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: {{ template "jenkins.fullname" . }}-tests
5 | data:
6 | run.sh: |-
7 | @test "Testing Jenkins UI is accessible" {
8 | curl --retry 48 --retry-delay 10 {{ template "jenkins.fullname" . }}:{{ .Values.Master.ServicePort }}{{ default "" .Values.Master.JenkinsUriPrefix }}/login
9 | }
10 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/.helmignore:
--------------------------------------------------------------------------------
1 | .git
2 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/Chart.yaml:
--------------------------------------------------------------------------------
1 | name: mariadb
2 | version: 5.5.0
3 | appVersion: 10.1.37
4 | description: Fast, reliable, scalable, and easy to use open-source relational database system. MariaDB Server is intended for mission-critical, heavy-load production systems as well as for embedding into mass-deployed software. Highly available MariaDB cluster.
5 | keywords:
6 | - mariadb
7 | - mysql
8 | - database
9 | - sql
10 | - prometheus
11 | home: https://mariadb.org
12 | icon: https://bitnami.com/assets/stacks/mariadb/img/mariadb-stack-220x234.png
13 | sources:
14 | - https://github.com/bitnami/bitnami-docker-mariadb
15 | - https://github.com/prometheus/mysqld_exporter
16 | maintainers:
17 | - name: Bitnami
18 | email: containers@bitnami.com
19 | engine: gotpl
20 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/OWNERS:
--------------------------------------------------------------------------------
1 | approvers:
2 | - prydonius
3 | - tompizmor
4 | - sameersbn
5 | - carrodher
6 | - juan131
7 | reviewers:
8 | - prydonius
9 | - tompizmor
10 | - sameersbn
11 | - carrodher
12 | - juan131
13 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/files/docker-entrypoint-initdb.d/README.md:
--------------------------------------------------------------------------------
1 | You can copy here your custom .sh, .sql or .sql.gz file so they are executed during the first boot of the image.
2 |
3 | More info in the [bitnami-docker-mariadb](https://github.com/bitnami/bitnami-docker-mariadb#initializing-a-new-instance) repository.
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/initialization-configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: {{ template "master.fullname" . }}-init-scripts
6 | labels:
7 | app: {{ template "mariadb.name" . }}
8 | chart: {{ template "mariadb.chart" . }}
9 | release: {{ .Release.Name | quote }}
10 | heritage: {{ .Release.Service | quote }}
11 | component: "master"
12 | {{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz") (not .Values.initdbScriptsConfigMap) }}
13 | binaryData:
14 | {{- $root := . }}
15 | {{- range $path, $bytes := .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }}
16 | {{ base $path }}: {{ $root.Files.Get $path | b64enc | quote }}
17 | {{- end }}
18 | {{- end }}
19 | data:
20 | {{- if and (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}") (not .Values.initdbScriptsConfigMap) }}
21 | {{ (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}").AsConfig | indent 2 }}
22 | {{- end }}
23 | {{- with .Values.initdbScripts }}
24 | {{ toYaml . | indent 2 }}
25 | {{- end }}
26 | {{ end }}
27 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/master-configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.master.config }}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: {{ template "master.fullname" . }}
6 | labels:
7 | app: {{ template "mariadb.name" . }}
8 | component: "master"
9 | chart: {{ template "mariadb.chart" . }}
10 | release: {{ .Release.Name | quote }}
11 | heritage: {{ .Release.Service | quote }}
12 | data:
13 | my.cnf: |-
14 | {{ .Values.master.config | indent 4 }}
15 | {{- end -}}
16 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/master-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ template "mariadb.fullname" . }}
5 | labels:
6 | app: "{{ template "mariadb.name" . }}"
7 | component: "master"
8 | chart: {{ template "mariadb.chart" . }}
9 | release: {{ .Release.Name | quote }}
10 | heritage: {{ .Release.Service | quote }}
11 | {{- if .Values.metrics.enabled }}
12 | annotations:
13 | {{ toYaml .Values.metrics.annotations | indent 4 }}
14 | {{- end }}
15 | spec:
16 | type: {{ .Values.service.type }}
17 | {{- if eq .Values.service.type "ClusterIP" }}
18 | {{- if .Values.service.clusterIp }}
19 | clusterIP: {{ .Values.service.clusterIp }}
20 | {{- end }}
21 | {{- end }}
22 | ports:
23 | - name: mysql
24 | port: {{ .Values.service.port }}
25 | targetPort: mysql
26 | {{- if eq .Values.service.type "NodePort" }}
27 | {{- if .Values.service.nodePort }}
28 | {{- if .Values.service.nodePort.master }}
29 | nodePort: {{ .Values.service.nodePort.master }}
30 | {{- end }}
31 | {{- end }}
32 | {{- end }}
33 | {{- if .Values.metrics.enabled }}
34 | - name: metrics
35 | port: 9104
36 | targetPort: metrics
37 | {{- end }}
38 | selector:
39 | app: "{{ template "mariadb.name" . }}"
40 | component: "master"
41 | release: "{{ .Release.Name }}"
42 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/slave-configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.replication.enabled .Values.slave.config }}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: {{ template "slave.fullname" . }}
6 | labels:
7 | app: {{ template "mariadb.name" . }}
8 | component: "slave"
9 | chart: {{ template "mariadb.chart" . }}
10 | release: {{ .Release.Name | quote }}
11 | heritage: {{ .Release.Service | quote }}
12 | data:
13 | my.cnf: |-
14 | {{ .Values.slave.config | indent 4 }}
15 | {{- end }}
16 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/slave-svc.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.replication.enabled }}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: {{ template "slave.fullname" . }}
6 | labels:
7 | app: "{{ template "mariadb.name" . }}"
8 | chart: {{ template "mariadb.chart" . }}
9 | component: "slave"
10 | release: {{ .Release.Name | quote }}
11 | heritage: {{ .Release.Service | quote }}
12 | {{- if .Values.metrics.enabled }}
13 | annotations:
14 | {{ toYaml .Values.metrics.annotations | indent 4 }}
15 | {{- end }}
16 | spec:
17 | type: {{ .Values.service.type }}
18 | {{- if eq .Values.service.type "ClusterIP" }}
19 | {{- if .Values.service.clusterIp }}
20 | clusterIP: {{ .Values.service.clusterIp }}
21 | {{- end }}
22 | {{- end }}
23 | ports:
24 | - name: mysql
25 | port: {{ .Values.service.port }}
26 | targetPort: mysql
27 | {{- if (eq .Values.service.type "NodePort") }}
28 | {{- if .Values.service.nodePort }}
29 | {{- if .Values.service.nodePort.slave }}
30 | nodePort: {{ .Values.service.nodePort.slave }}
31 | {{- end }}
32 | {{- end }}
33 | {{- end }}
34 | {{- if .Values.metrics.enabled }}
35 | - name: metrics
36 | port: 9104
37 | targetPort: metrics
38 | {{- end }}
39 | selector:
40 | app: "{{ template "mariadb.name" . }}"
41 | component: "slave"
42 | release: "{{ .Release.Name }}"
43 | {{- end }}
44 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/test-runner.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ template "mariadb.fullname" . }}-test-{{ randAlphaNum 5 | lower }}"
5 | annotations:
6 | "helm.sh/hook": test-success
7 | spec:
8 | initContainers:
9 | - name: "test-framework"
10 | image: "dduportal/bats:0.4.0"
11 | command:
12 | - "bash"
13 | - "-c"
14 | - |
15 | set -ex
16 | # copy bats to tools dir
17 | cp -R /usr/local/libexec/ /tools/bats/
18 | volumeMounts:
19 | - mountPath: /tools
20 | name: tools
21 | containers:
22 | - name: mariadb-test
23 | image: {{ template "mariadb.image" . }}
24 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
25 | command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
26 | env:
27 | - name: MARIADB_ROOT_PASSWORD
28 | valueFrom:
29 | secretKeyRef:
30 | {{- if .Values.existingSecret }}
31 | name: {{ .Values.existingSecret }}
32 | {{- else }}
33 | name: {{ template "mariadb.fullname" . }}
34 | {{- end }}
35 | key: mariadb-root-password
36 | volumeMounts:
37 | - mountPath: /tests
38 | name: tests
39 | readOnly: true
40 | - mountPath: /tools
41 | name: tools
42 | volumes:
43 | - name: tests
44 | configMap:
45 | name: {{ template "mariadb.fullname" . }}-tests
46 | - name: tools
47 | emptyDir: {}
48 | restartPolicy: Never
49 |
--------------------------------------------------------------------------------
/manifests/mariadb-cluster/mariadb/templates/tests.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: {{ template "mariadb.fullname" . }}-tests
5 | data:
6 | run.sh: |-
7 | @test "Testing MariaDB is accessible" {
8 | mysql -h {{ template "mariadb.fullname" . }} -uroot -p$MARIADB_ROOT_PASSWORD -e 'show databases;'
9 | }
10 |
--------------------------------------------------------------------------------
/manifests/mysql-cluster/mysql-configmap.yaml:
--------------------------------------------------------------------------------
1 | # https://kubernetes.io/docs/tasks/run-application/run-replicated-stateful-application/
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: mysql
6 | labels:
7 | app: mysql
8 | app.kubernetes.io/name: mysql
9 | data:
10 | primary.cnf: |
11 | # Apply this config only on the primary.
12 | [mysqld]
13 | log-bin
14 | replica.cnf: |
15 | # Apply this config only on replicas.
16 | [mysqld]
17 | super-read-only
18 |
--------------------------------------------------------------------------------
/manifests/mysql-cluster/mysql-services.yaml:
--------------------------------------------------------------------------------
1 | # https://kubernetes.io/docs/tasks/run-application/run-replicated-stateful-application/
2 | # Headless service for stable DNS entries of StatefulSet members.
3 | apiVersion: v1
4 | kind: Service
5 | metadata:
6 | name: mysql
7 | labels:
8 | app: mysql
9 | app.kubernetes.io/name: mysql
10 | spec:
11 | ports:
12 | - name: mysql
13 | port: 3306
14 | clusterIP: None
15 | selector:
16 | app: mysql
17 | ---
18 | # Client service for connecting to any MySQL instance for reads.
19 | # For writes, you must instead connect to the primary: mysql-0.mysql.
20 | apiVersion: v1
21 | kind: Service
22 | metadata:
23 | name: mysql-read
24 | labels:
25 | app: mysql
26 | app.kubernetes.io/name: mysql
27 | readonly: "true"
28 | spec:
29 | ports:
30 | - name: mysql
31 | port: 3306
32 | selector:
33 | app: mysql
34 |
--------------------------------------------------------------------------------
/manifests/mysql-cluster/mysql-test-client.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: mycli
6 | name: mysql-test-client
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: mycli
12 | template:
13 | metadata:
14 | labels:
15 | app: mycli
16 | spec:
17 | containers:
18 | - name: mycli
19 | image: mysql:5.7
20 | command:
21 | - tail
22 | - "-f"
23 | - "/dev/null"
24 | env:
25 | - name: TZ
26 | value: "Asia/Shanghai"
27 | - name: LANG
28 | value: "C.UTF-8"
29 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | appVersion: 5.0.6
3 | description: Highly available Kubernetes implementation of Redis
4 | engine: gotpl
5 | home: http://redis.io/
6 | icon: https://upload.wikimedia.org/wikipedia/en/thumb/6/6b/Redis_Logo.svg/1200px-Redis_Logo.svg.png
7 | keywords:
8 | - redis
9 | - keyvalue
10 | - database
11 | maintainers:
12 | - email: salimsalaues@gmail.com
13 | name: ssalaues
14 | - email: aaron.layfield@gmail.com
15 | name: dandydeveloper
16 | name: redis-ha
17 | sources:
18 | - https://redis.io/download
19 | - https://github.com/scality/Zenko/tree/development/1.0/kubernetes/zenko/charts/redis-ha
20 | - https://github.com/oliver006/redis_exporter
21 | version: 4.4.4
22 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/OWNERS:
--------------------------------------------------------------------------------
1 | approvers:
2 | - ssalaues
3 | - dandydeveloper
4 | reviewers:
5 | - ssalaues
6 | - dandydeveloper
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/ci/haproxy-enabled-values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Enable HAProxy to manage Load Balancing
3 | haproxy:
4 | enabled: true
5 | annotations:
6 | any.domain/key: "value"
7 | serviceAccount:
8 | create: true
9 | metrics:
10 | enabled: true
11 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | Redis can be accessed via port {{ .Values.redis.port }} and Sentinel can be accessed via port {{ .Values.sentinel.port }} on the following DNS name from within your cluster:
2 | {{ template "redis-ha.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
3 |
4 | To connect to your Redis server:
5 |
6 | {{- if .Values.auth }}
7 | 1. To retrieve the redis password:
8 | echo $(kubectl get secret {{ template "redis-ha.fullname" . }} -o "jsonpath={.data['auth']}" | base64 --decode)
9 |
10 | 2. Connect to the Redis master pod that you can use as a client. By default the {{ template "redis-ha.fullname" . }}-server-0 pod is configured as the master:
11 |
12 | kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }}
13 |
14 | 3. Connect using the Redis CLI (inside container):
15 |
16 | redis-cli -a
17 | {{- else }}
18 | 1. Run a Redis pod that you can use as a client:
19 |
20 | kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }}
21 |
22 | 2. Connect using the Redis CLI:
23 |
24 | redis-cli -h {{ template "redis-ha.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
25 | {{- end }}
26 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-auth-secret.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.auth (not .Values.existingSecret) -}}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ template "redis-ha.fullname" . }}
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | {{ include "labels.standard" . | indent 4 }}
9 | type: Opaque
10 | data:
11 | {{ .Values.authKey }}: {{ .Values.redisPassword | b64enc | quote }}
12 | {{- end -}}
13 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-announce-service.yaml:
--------------------------------------------------------------------------------
1 | {{- $fullName := include "redis-ha.fullname" . }}
2 | {{- $namespace := .Release.Namespace -}}
3 | {{- $replicas := int (toString .Values.replicas) }}
4 | {{- $root := . }}
5 | {{- range $i := until $replicas }}
6 | ---
7 | apiVersion: v1
8 | kind: Service
9 | metadata:
10 | name: {{ $fullName }}-announce-{{ $i }}
11 | namespace: {{ $namespace }}
12 | labels:
13 | {{ include "labels.standard" $root | indent 4 }}
14 | annotations:
15 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
16 | {{- if $root.Values.serviceAnnotations }}
17 | {{ toYaml $root.Values.serviceAnnotations | indent 4 }}
18 | {{- end }}
19 | spec:
20 | publishNotReadyAddresses: true
21 | type: ClusterIP
22 | ports:
23 | - name: server
24 | port: {{ $root.Values.redis.port }}
25 | protocol: TCP
26 | targetPort: redis
27 | - name: sentinel
28 | port: {{ $root.Values.sentinel.port }}
29 | protocol: TCP
30 | targetPort: sentinel
31 | {{- if $root.Values.exporter.enabled }}
32 | - name: exporter
33 | port: {{ $root.Values.exporter.port }}
34 | protocol: TCP
35 | targetPort: exporter-port
36 | {{- end }}
37 | selector:
38 | release: {{ $root.Release.Name }}
39 | app: {{ include "redis-ha.name" $root }}
40 | "statefulset.kubernetes.io/pod-name": {{ $fullName }}-server-{{ $i }}
41 | {{- end }}
42 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: {{ template "redis-ha.fullname" . }}-configmap
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | heritage: {{ .Release.Service }}
8 | release: {{ .Release.Name }}
9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
10 | app: {{ template "redis-ha.fullname" . }}
11 | data:
12 | redis.conf: |
13 | {{- include "config-redis.conf" . }}
14 |
15 | sentinel.conf: |
16 | {{- include "config-sentinel.conf" . }}
17 |
18 | init.sh: |
19 | {{- include "config-init.sh" . }}
20 | {{ if .Values.haproxy.enabled }}
21 | haproxy.cfg: |-
22 | {{- include "config-haproxy.cfg" . }}
23 | {{- end }}
24 | haproxy_init.sh: |
25 | {{- include "config-haproxy_init.sh" . }}
26 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-exporter-script-configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.exporter.script }}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: {{ template "redis-ha.fullname" . }}-exporter-script-configmap
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | {{ include "labels.standard" . | indent 4 }}
9 | data:
10 | script: {{ toYaml .Values.exporter.script | indent 2 }}
11 | {{- end }}
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-pdb.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.podDisruptionBudget -}}
2 | apiVersion: policy/v1beta1
3 | kind: PodDisruptionBudget
4 | metadata:
5 | name: {{ template "redis-ha.fullname" . }}-pdb
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | {{ include "labels.standard" . | indent 4 }}
9 | spec:
10 | selector:
11 | matchLabels:
12 | release: {{ .Release.Name }}
13 | app: {{ template "redis-ha.name" . }}
14 | {{ toYaml .Values.podDisruptionBudget | indent 2 }}
15 | {{- end -}}
16 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-role.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.serviceAccount.create .Values.rbac.create }}
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: {{ template "redis-ha.fullname" . }}
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | heritage: {{ .Release.Service }}
9 | release: {{ .Release.Name }}
10 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
11 | app: {{ template "redis-ha.fullname" . }}
12 | rules:
13 | - apiGroups:
14 | - ""
15 | resources:
16 | - endpoints
17 | verbs:
18 | - get
19 | {{- end }}
20 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-rolebinding.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.serviceAccount.create .Values.rbac.create }}
2 | kind: RoleBinding
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | metadata:
5 | name: {{ template "redis-ha.fullname" . }}
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | heritage: {{ .Release.Service }}
9 | release: {{ .Release.Name }}
10 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
11 | app: {{ template "redis-ha.fullname" . }}
12 | subjects:
13 | - kind: ServiceAccount
14 | name: {{ template "redis-ha.serviceAccountName" . }}
15 | roleRef:
16 | apiGroup: rbac.authorization.k8s.io
17 | kind: Role
18 | name: {{ template "redis-ha.fullname" . }}
19 | {{- end }}
20 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ template "redis-ha.fullname" . }}
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{ include "labels.standard" . | indent 4 }}
8 | {{- if and ( .Values.exporter.enabled ) ( .Values.exporter.serviceMonitor.enabled ) }}
9 | servicemonitor: enabled
10 | {{- end }}
11 | annotations:
12 | {{- if .Values.serviceAnnotations }}
13 | {{ toYaml .Values.serviceAnnotations | indent 4 }}
14 | {{- end }}
15 | spec:
16 | type: ClusterIP
17 | clusterIP: None
18 | ports:
19 | - name: server
20 | port: {{ .Values.redis.port }}
21 | protocol: TCP
22 | targetPort: redis
23 | - name: sentinel
24 | port: {{ .Values.sentinel.port }}
25 | protocol: TCP
26 | targetPort: sentinel
27 | {{- if .Values.exporter.enabled }}
28 | - name: exporter-port
29 | port: {{ .Values.exporter.port }}
30 | protocol: TCP
31 | targetPort: exporter-port
32 | {{- end }}
33 | selector:
34 | release: {{ .Release.Name }}
35 | app: {{ template "redis-ha.name" . }}
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create }}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ template "redis-ha.serviceAccountName" . }}
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | heritage: {{ .Release.Service }}
9 | release: {{ .Release.Name }}
10 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
11 | app: {{ template "redis-ha.fullname" . }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-ha-servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | {{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.exporter.serviceMonitor.enabled ) ( .Values.exporter.enabled ) }}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | {{- if .Values.exporter.serviceMonitor.labels }}
6 | labels:
7 | {{ toYaml .Values.exporter.serviceMonitor.labels | indent 4}}
8 | {{- end }}
9 | name: {{ template "redis-ha.fullname" . }}
10 | namespace: {{ .Release.Namespace }}
11 | {{- if .Values.exporter.serviceMonitor.namespace }}
12 | namespace: {{ .Values.exporter.serviceMonitor.namespace }}
13 | {{- end }}
14 | spec:
15 | endpoints:
16 | - targetPort: {{ .Values.exporter.port }}
17 | {{- if .Values.exporter.serviceMonitor.interval }}
18 | interval: {{ .Values.exporter.serviceMonitor.interval }}
19 | {{- end }}
20 | {{- if .Values.exporter.serviceMonitor.telemetryPath }}
21 | path: {{ .Values.exporter.serviceMonitor.telemetryPath }}
22 | {{- end }}
23 | {{- if .Values.exporter.serviceMonitor.timeout }}
24 | scrapeTimeout: {{ .Values.exporter.serviceMonitor.timeout }}
25 | {{- end }}
26 | jobLabel: {{ template "redis-ha.fullname" . }}
27 | namespaceSelector:
28 | matchNames:
29 | - {{ .Release.Namespace }}
30 | selector:
31 | matchLabels:
32 | app: {{ template "redis-ha.name" . }}
33 | release: {{ .Release.Name }}
34 | servicemonitor: enabled
35 | {{- end }}
36 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-haproxy-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.haproxy.serviceAccount.create .Values.haproxy.enabled }}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ template "redis-ha.serviceAccountName" . }}-haproxy
6 | namespace: {{ .Release.Namespace }}
7 | labels:
8 | heritage: {{ .Release.Service }}
9 | release: {{ .Release.Name }}
10 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
11 | app: {{ template "redis-ha.fullname" . }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/redis-haproxy-servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | {{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.haproxy.metrics.serviceMonitor.enabled ) ( .Values.haproxy.metrics.enabled ) }}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | {{- with .Values.haproxy.metrics.serviceMonitor.labels }}
6 | labels: {{ toYaml . | nindent 4}}
7 | {{- end }}
8 | name: {{ template "redis-ha.fullname" . }}-haproxy
9 | namespace: {{ .Release.Namespace }}
10 | {{- if .Values.haproxy.metrics.serviceMonitor.namespace }}
11 | namespace: {{ .Values.haproxy.metrics.serviceMonitor.namespace }}
12 | {{- end }}
13 | spec:
14 | endpoints:
15 | - targetPort: {{ .Values.haproxy.metrics.port }}
16 | {{- if .Values.haproxy.metrics.serviceMonitor.interval }}
17 | interval: {{ .Values.haproxy.metrics.serviceMonitor.interval }}
18 | {{- end }}
19 | {{- if .Values.haproxy.metrics.serviceMonitor.telemetryPath }}
20 | path: {{ .Values.haproxy.metrics.serviceMonitor.telemetryPath }}
21 | {{- end }}
22 | {{- if .Values.haproxy.metrics.serviceMonitor.timeout }}
23 | scrapeTimeout: {{ .Values.haproxy.metrics.serviceMonitor.timeout }}
24 | {{- end }}
25 | jobLabel: {{ template "redis-ha.fullname" . }}-haproxy
26 | namespaceSelector:
27 | matchNames:
28 | - {{ .Release.Namespace }}
29 | selector:
30 | matchLabels:
31 | app: {{ template "redis-ha.name" . }}
32 | release: {{ .Release.Name }}
33 | component: {{ template "redis-ha.fullname" . }}-haproxy
34 | {{- end }}
35 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/tests/test-redis-ha-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: {{ template "redis-ha.fullname" . }}-configmap-test
5 | labels:
6 | {{ include "labels.standard" . | indent 4 }}
7 | annotations:
8 | "helm.sh/hook": test-success
9 | spec:
10 | containers:
11 | - name: check-init
12 | image: koalaman/shellcheck:v0.5.0
13 | args:
14 | - --shell=sh
15 | - /readonly-config/init.sh
16 | volumeMounts:
17 | - name: config
18 | mountPath: /readonly-config
19 | readOnly: true
20 | {{- if .Values.imagePullSecrets }}
21 | imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 4 }}
22 | {{- end }}
23 | restartPolicy: Never
24 | volumes:
25 | - name: config
26 | configMap:
27 | name: {{ template "redis-ha.fullname" . }}-configmap
28 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/redis-ha/templates/tests/test-redis-ha-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: {{ template "redis-ha.fullname" . }}-service-test
5 | labels:
6 | {{ include "labels.standard" . | indent 4 }}
7 | annotations:
8 | "helm.sh/hook": test-success
9 | spec:
10 | containers:
11 | - name: "{{ .Release.Name }}-service-test"
12 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
13 | command:
14 | - sh
15 | - -c
16 | - redis-cli -h {{ template "redis-ha.fullname" . }} -p {{ .Values.redis.port }} info server
17 | {{- if .Values.imagePullSecrets }}
18 | imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 4 }}
19 | {{- end }}
20 | restartPolicy: Never
21 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -x
3 |
4 | ROOT=$(cd `dirname $0`; pwd)
5 | cd $ROOT
6 |
7 | helm install redis \
8 | --create-namespace \
9 | --namespace dependency \
10 | -f ./values.yaml \
11 | ./redis-ha
12 |
--------------------------------------------------------------------------------
/manifests/redis-cluster/values.yaml:
--------------------------------------------------------------------------------
1 | image:
2 | repository: redis
3 | tag: 5.0.6-alpine
4 |
5 | replicas: 2
6 |
7 | ## Redis specific configuration options
8 | redis:
9 | port: 6379
10 | masterGroupName: "mymaster" # must match ^[\\w-\\.]+$) and can be templated
11 | config:
12 | ## For all available options see http://download.redis.io/redis-stable/redis.conf
13 | min-replicas-to-write: 1
14 | min-replicas-max-lag: 5 # Value in seconds
15 | maxmemory: "4g" # Max memory to use for each redis instance. Default is unlimited.
16 | maxmemory-policy: "allkeys-lru" # Max memory policy to use for each redis instance. Default is volatile-lru.
17 | repl-diskless-sync: "yes"
18 | rdbcompression: "yes"
19 | rdbchecksum: "yes"
20 |
21 | resources:
22 | requests:
23 | memory: 200Mi
24 | cpu: 100m
25 | limits:
26 | memory: 4000Mi
27 |
28 | ## Sentinel specific configuration options
29 | sentinel:
30 | port: 26379
31 | quorum: 1
32 |
33 | resources:
34 | requests:
35 | memory: 200Mi
36 | cpu: 100m
37 | limits:
38 | memory: 200Mi
39 |
40 | hardAntiAffinity: true
41 |
42 | ## Configures redis with AUTH (requirepass & masterauth conf params)
43 | auth: false
44 |
45 | persistentVolume:
46 | enabled: false
47 |
48 | hostPath:
49 | path: "/data/mcs-redis/{{ .Release.Name }}"
50 |
--------------------------------------------------------------------------------
/manifests/storage/local-storage/example-sts.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: local-test
5 | spec:
6 | serviceName: ""
7 | replicas: 2
8 | selector:
9 | matchLabels:
10 | app: local-test
11 | template:
12 | metadata:
13 | labels:
14 | app: local-test
15 | spec:
16 | containers:
17 | - name: test-container
18 | image: busybox
19 | command:
20 | - "/bin/sh"
21 | args:
22 | - "-c"
23 | - "sleep 100000"
24 | volumeMounts:
25 | - name: local-vol
26 | mountPath: /usr/test-pod
27 | volumeClaimTemplates:
28 | - metadata:
29 | name: local-vol
30 | spec:
31 | accessModes: [ "ReadWriteOnce" ]
32 | storageClassName: "local-storage"
33 | resources:
34 | requests:
35 | storage: 5Gi
36 |
--------------------------------------------------------------------------------
/manifests/storage/local-storage/local-pv1.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: local-pv1
5 | spec:
6 | capacity:
7 | storage: 5Gi
8 | volumeMode: Filesystem
9 | accessModes:
10 | - ReadWriteOnce
11 | persistentVolumeReclaimPolicy: Delete
12 | storageClassName: local-storage
13 | local:
14 | path: /mnt/disks/vol1
15 | nodeAffinity:
16 | required:
17 | nodeSelectorTerms:
18 | - matchExpressions:
19 | - key: kubernetes.io/hostname
20 | operator: In
21 | values:
22 | - 192.168.1.2
23 | - 192.168.1.3
24 |
--------------------------------------------------------------------------------
/manifests/storage/local-storage/local-pv2.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: local-pv2
5 | spec:
6 | capacity:
7 | storage: 5Gi
8 | volumeMode: Filesystem
9 | accessModes:
10 | - ReadWriteOnce
11 | persistentVolumeReclaimPolicy: Delete
12 | storageClassName: local-storage
13 | local:
14 | path: /mnt/disks/vol2
15 | nodeAffinity:
16 | required:
17 | nodeSelectorTerms:
18 | - matchExpressions:
19 | - key: kubernetes.io/hostname
20 | operator: In
21 | values:
22 | - 192.168.1.4
23 |
--------------------------------------------------------------------------------
/manifests/storage/local-storage/local-storage-class.yml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: local-storage
5 | provisioner: kubernetes.io/no-provisioner
6 | volumeBindingMode: WaitForFirstConsumer
7 |
--------------------------------------------------------------------------------
/manifests/storage/test.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: test-claim
5 | spec:
6 | storageClassName: nfs-dynamic-class
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 1Mi
12 |
13 | ---
14 | kind: Pod
15 | apiVersion: v1
16 | metadata:
17 | name: test
18 | spec:
19 | containers:
20 | - name: test
21 | image: busybox:1.28.4
22 | imagePullPolicy: IfNotPresent
23 | command:
24 | - "/bin/sh"
25 | args:
26 | - "-c"
27 | - "echo 'hello k8s' > /mnt/SUCCESS && sleep 36000 || exit 1"
28 | volumeMounts:
29 | - name: nfs-pvc
30 | mountPath: "/mnt"
31 | restartPolicy: "Never"
32 | volumes:
33 | - name: nfs-pvc
34 | persistentVolumeClaim:
35 | claimName: test-claim
36 |
--------------------------------------------------------------------------------
/pics/alipay.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/pics/alipay.gif
--------------------------------------------------------------------------------
/pics/ha-1x.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/pics/ha-1x.gif
--------------------------------------------------------------------------------
/pics/wxpay.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/pics/wxpay.gif
--------------------------------------------------------------------------------
/playbooks/01.prepare.yml:
--------------------------------------------------------------------------------
1 | # [optional] to synchronize system time of nodes with 'chrony'
2 | - hosts:
3 | - kube_master
4 | - kube_node
5 | - etcd
6 | - ex_lb
7 | - chrony
8 | roles:
9 | - { role: os-harden, when: "OS_HARDEN|bool" }
10 | - { role: chrony, when: "groups['chrony']|length > 0" }
11 |
12 | # to create CA, kubeconfig, kube-proxy.kubeconfig etc.
13 | - hosts: localhost
14 | roles:
15 | - deploy
16 |
17 | # prepare tasks for all nodes
18 | - hosts:
19 | - kube_master
20 | - kube_node
21 | - etcd
22 | roles:
23 | - prepare
24 |
--------------------------------------------------------------------------------
/playbooks/02.etcd.yml:
--------------------------------------------------------------------------------
1 | # to install etcd cluster
2 | - hosts: etcd
3 | roles:
4 | - etcd
5 |
--------------------------------------------------------------------------------
/playbooks/03.runtime.yml:
--------------------------------------------------------------------------------
1 | # to install a container runtime
2 | - hosts:
3 | - kube_master
4 | - kube_node
5 | roles:
6 | - { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
7 | - { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
8 |
--------------------------------------------------------------------------------
/playbooks/04.kube-master.yml:
--------------------------------------------------------------------------------
1 | # to set up 'kube_master' nodes
2 | - hosts: kube_master
3 | roles:
4 | - kube-lb
5 | - kube-master
6 | - kube-node
7 |
--------------------------------------------------------------------------------
/playbooks/05.kube-node.yml:
--------------------------------------------------------------------------------
1 | # to set up 'kube_node' nodes
2 | - hosts: kube_node
3 | roles:
4 | - { role: kube-lb, when: "inventory_hostname not in groups['kube_master']" }
5 | - { role: kube-node, when: "inventory_hostname not in groups['kube_master']" }
6 |
--------------------------------------------------------------------------------
/playbooks/06.network.yml:
--------------------------------------------------------------------------------
1 | # to install network plugin, only one can be choosen
2 | - hosts:
3 | - kube_master
4 | - kube_node
5 | roles:
6 | - { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
7 | - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
8 | - { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
9 | - { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
10 | - { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
11 |
--------------------------------------------------------------------------------
/playbooks/07.cluster-addon.yml:
--------------------------------------------------------------------------------
1 | # to install clust-addons
2 | - hosts: localhost
3 | roles:
4 | - cluster-addon
5 |
--------------------------------------------------------------------------------
/playbooks/10.ex-lb.yml:
--------------------------------------------------------------------------------
1 | - hosts: ex_lb
2 | roles:
3 | - ex-lb
4 |
--------------------------------------------------------------------------------
/playbooks/11.harbor.yml:
--------------------------------------------------------------------------------
1 | # [optional] to set up a HARBOR, and to integrate the HARBOR with k8s cluster
2 | # read the guide: 'guide/harbor.md'
3 |
4 | ### --- install harbor ---
5 | - hosts: harbor
6 | roles:
7 | - { role: os-harden, when: "NEW_INSTALL|bool and OS_HARDEN|bool" }
8 | - { role: chrony, when: "NEW_INSTALL|bool and groups['chrony']|length > 0" }
9 | - { role: prepare, when: "NEW_INSTALL|bool" }
10 | - { role: docker, when: "NEW_INSTALL|bool" }
11 | - { role: harbor, when: "NEW_INSTALL|bool" }
12 |
13 | ### --- config k8s nodes to work with harbor ---
14 | - hosts:
15 | - kube_master
16 | - kube_node
17 | tasks:
18 | # [optional] if you have a DNS server, add an 'A record' instead
19 | - name: Adding an '/etc/hosts' entry for the HARBOR DOMAIN
20 | lineinfile:
21 | dest: /etc/hosts
22 | state: present
23 | regexp: '{{ HARBOR_DOMAIN }}'
24 | line: "{{ groups['harbor'][0] }} {{ HARBOR_DOMAIN }}"
25 | when: "hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] != ''"
26 |
--------------------------------------------------------------------------------
/playbooks/22.addnode.yml:
--------------------------------------------------------------------------------
1 | # Note: this playbook can not run independently
2 |
3 | - hosts: "{{ NODE_TO_ADD }}"
4 | roles:
5 | - { role: os-harden, when: "OS_HARDEN|bool" }
6 | - { role: chrony, when: "groups['chrony']|length > 0" }
7 | - prepare
8 | - { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
9 | - { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
10 | - kube-lb
11 | - kube-node
12 | - { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
13 | - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
14 | - { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
15 | - { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
16 |
--------------------------------------------------------------------------------
/playbooks/23.addmaster.yml:
--------------------------------------------------------------------------------
1 | # Note: this playbook cann't run independently
2 |
3 | - hosts: "{{ NODE_TO_ADD }}"
4 | roles:
5 | - { role: os-harden, when: "OS_HARDEN|bool" }
6 | - { role: chrony, when: "groups['chrony']|length > 0" }
7 | - prepare
8 | - { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
9 | - { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
10 | - kube-lb
11 | - kube-master
12 | - kube-node
13 | - { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
14 | - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
15 | - { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
16 | - { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
17 | - { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
18 |
--------------------------------------------------------------------------------
/playbooks/91.start.yml:
--------------------------------------------------------------------------------
1 | - hosts: etcd
2 | tasks:
3 | - name: starting etcd cluster
4 | service: name=etcd state=started enabled=yes
5 |
6 | - hosts:
7 | - kube_master
8 | - kube_node
9 | tasks:
10 | - name: starting kube-lb
11 | service: name=kube-lb state=started enabled=yes
12 |
13 | - hosts: kube_master
14 | tasks:
15 | - name: starting kube_master services
16 | service: name={{ item }} state=started enabled=yes
17 | with_items:
18 | - kube-apiserver
19 | - kube-controller-manager
20 | - kube-scheduler
21 |
22 | - hosts:
23 | - kube_master
24 | - kube_node
25 | tasks:
26 | - name: starting docker
27 | service: name=docker state=started enabled=yes
28 | when: "CONTAINER_RUNTIME == 'docker'"
29 |
30 | - name: starting containerd
31 | service: name=containerd state=started enabled=yes
32 | when: "CONTAINER_RUNTIME == 'containerd'"
33 |
34 | - name: starting kube_node services
35 | service: name={{ item }} state=started enabled=yes
36 | with_items:
37 | - kubelet
38 | - kube-proxy
39 |
40 | - hosts: ex_lb
41 | tasks:
42 | - name: starting external loadbalance
43 | service: name={{ item }} state=started enabled=yes
44 | with_items:
45 | - l4lb
46 | - keepalived
47 |
--------------------------------------------------------------------------------
/playbooks/92.stop.yml:
--------------------------------------------------------------------------------
1 | - hosts: kube_master
2 | tasks:
3 | - name: stopping kube_master services
4 | service: name={{ item }} state=stopped enabled=no
5 | with_items:
6 | - kube-apiserver
7 | - kube-controller-manager
8 | - kube-scheduler
9 |
10 | - hosts: etcd
11 | tasks:
12 | - name: stopping etcd cluster
13 | service: name=etcd state=stopped enabled=no
14 |
15 | - hosts: ex_lb
16 | tasks:
17 | - name: stopping external loadbalance
18 | service: name={{ item }} state=stopped enabled=no
19 | with_items:
20 | - l4lb
21 | - keepalived
22 |
23 | - hosts:
24 | - kube_master
25 | - kube_node
26 | tasks:
27 | - name: stopping kube_node services
28 | service: name={{ item }} state=stopped enabled=no
29 | with_items:
30 | - kube-lb
31 | - kubelet
32 | - kube-proxy
33 |
34 | - name: stopping docker
35 | service: name=docker state=stopped enabled=no
36 | when: "CONTAINER_RUNTIME == 'docker'"
37 |
38 | - name: stopping containerd
39 | service: name=containerd state=stopped enabled=no
40 | when: "CONTAINER_RUNTIME == 'containerd'"
41 |
--------------------------------------------------------------------------------
/playbooks/93.upgrade.yml:
--------------------------------------------------------------------------------
1 | # WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
2 | # Read the guide: 'op/upgrade.md' .
3 | # Usage: ezctl upgrade
4 |
5 | # check k8s version
6 | - hosts: kube_master
7 | tasks:
8 | - name: get running k8s version
9 | shell: "{{ bin_dir }}/kube-apiserver --version"
10 | register: RUNNING_VER
11 | run_once: true
12 |
13 | - name: print running version
14 | debug: var="RUNNING_VER.stdout"
15 | run_once: true
16 |
17 | - name: get update version
18 | shell: "{{ base_dir }}/bin/kube-apiserver --version"
19 | register: UPDATE_VER
20 | run_once: true
21 | connection: local
22 |
23 | - name: print update version
24 | debug: var="UPDATE_VER.stdout"
25 | run_once: true
26 |
27 | - name: check version
28 | fail: msg="running version is the same as the update version, UPDATE ABORT."
29 | when: "RUNNING_VER.stdout == UPDATE_VER.stdout"
30 |
31 | # update masters
32 | - hosts:
33 | - kube_master
34 | roles:
35 | - kube-master
36 | - kube-node
37 |
38 | # update nodes
39 | - hosts:
40 | - kube_node
41 | roles:
42 | - { role: kube-node, when: "inventory_hostname not in groups['kube_master']" }
43 |
--------------------------------------------------------------------------------
/playbooks/95.restore.yml:
--------------------------------------------------------------------------------
1 | # cluster-restore playbook
2 | # read the guide: 'op/cluster_restore.md'
3 | # https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#restoring-an-etcd-cluster
4 |
5 | - hosts: kube_master
6 | tasks:
7 | - name: stopping kube_master services
8 | service: name={{ item }} state=stopped
9 | with_items:
10 | - kube-apiserver
11 | - kube-controller-manager
12 | - kube-scheduler
13 |
14 | - hosts:
15 | - kube_master
16 | - kube_node
17 | tasks:
18 | - name: stopping kube_node services
19 | service: name={{ item }} state=stopped
20 | with_items:
21 | - kubelet
22 | - kube-proxy
23 |
24 | - hosts: etcd
25 | roles:
26 | - cluster-restore
27 |
28 | - hosts: kube_master
29 | tasks:
30 | - name: starting kube_master services
31 | service: name={{ item }} state=started enabled=yes
32 | with_items:
33 | - kube-apiserver
34 | - kube-controller-manager
35 | - kube-scheduler
36 |
37 | - hosts:
38 | - kube_master
39 | - kube_node
40 | tasks:
41 | - name: starting kube_node services
42 | service: name={{ item }} state=started enabled=yes
43 | with_items:
44 | - kubelet
45 | - kube-proxy
46 |
--------------------------------------------------------------------------------
/playbooks/99.clean.yml:
--------------------------------------------------------------------------------
1 | # WARNING: This playbook will erase the entire k8s-cluster, include PODs, ETCD data etc.
2 | # Make sure you know what you are doing.
3 |
4 | - hosts:
5 | - kube_master
6 | - kube_node
7 | - ex_lb
8 | - etcd
9 | vars:
10 | DEL_MASTER: "yes"
11 | DEL_NODE: "yes"
12 | DEL_ETCD: "yes"
13 | DEL_LB: "yes"
14 | DEL_CHRONY: "yes"
15 | DEL_ENV: "yes"
16 | roles:
17 | - clean
18 |
--------------------------------------------------------------------------------
/roles/calico/templates/bgp-default.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: projectcalico.org/v3
2 | kind: BGPConfiguration
3 | metadata:
4 | name: default
5 | spec:
6 | logSeverityScreen: Info
7 | nodeToNodeMeshEnabled: false
8 | asNumber: {{ CALICO_AS_NUMBER }}
9 |
--------------------------------------------------------------------------------
/roles/calico/templates/bgp-rr.yaml.j2:
--------------------------------------------------------------------------------
1 | kind: BGPPeer
2 | apiVersion: projectcalico.org/v3
3 | metadata:
4 | name: peer-with-route-reflectors
5 | spec:
6 | nodeSelector: all()
7 | peerSelector: route-reflector == 'true'
8 |
--------------------------------------------------------------------------------
/roles/calico/templates/calico-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "calico",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "k8s",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/calico/templates/calicoctl.cfg.j2:
--------------------------------------------------------------------------------
1 | apiVersion: projectcalico.org/v3
2 | kind: CalicoAPIConfig
3 | metadata:
4 | spec:
5 | datastoreType: "etcdv3"
6 | etcdEndpoints: {{ ETCD_ENDPOINTS }}
7 | etcdKeyFile: /etc/calico/ssl/calico-key.pem
8 | etcdCertFile: /etc/calico/ssl/calico.pem
9 | etcdCACertFile: {{ ca_dir }}/ca.pem
10 |
--------------------------------------------------------------------------------
/roles/calico/vars/main.yml:
--------------------------------------------------------------------------------
1 | # etcd 集群服务地址列表, 根据etcd组成员自动生成
2 | TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}"
3 | ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}"
4 |
5 | # calico AS number
6 | CALICO_AS_NUMBER: 64512
7 |
--------------------------------------------------------------------------------
/roles/chrony/chrony.yml:
--------------------------------------------------------------------------------
1 | - hosts:
2 | - kube_master
3 | - kube_node
4 | - etcd
5 | - ex_lb
6 | - chrony
7 | roles:
8 | - { role: chrony, when: "groups['chrony']|length > 0" }
9 |
--------------------------------------------------------------------------------
/roles/chrony/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # 设置时间源服务器【重要:集群内机器时间必须同步】
2 | ntp_servers:
3 | - "ntp1.aliyun.com"
4 | - "time1.cloud.tencent.com"
5 | - "0.cn.pool.ntp.org"
6 |
7 | # 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许
8 | local_network: "0.0.0.0/0"
9 |
--------------------------------------------------------------------------------
/roles/chrony/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: prepare some dirs
2 | file: name={{ item }} state=directory
3 | with_items:
4 | - "/etc/chrony"
5 | - "/var/lib/chrony"
6 | - "/var/log/chrony"
7 |
8 | - name: 卸载 ntp
9 | package: name=ntp state=absent
10 | ignore_errors: true
11 |
12 | - name: 下载二进制文件chronyd
13 | copy: src={{ base_dir }}/bin/chronyd dest=/usr/sbin/chronyd mode=0755
14 |
15 | - name: 创建chronyd的systemd unit文件
16 | template: src=chronyd.service.j2 dest=/etc/systemd/system/chronyd.service
17 |
18 | - name: 配置 chrony server
19 | template: src=server.conf.j2 dest=/etc/chrony/chrony.conf
20 | when: 'inventory_hostname == groups.chrony[0]'
21 |
22 | - name: 配置 chrony client
23 | template: src=client.conf.j2 dest=/etc/chrony/chrony.conf
24 | when: 'inventory_hostname != groups.chrony[0]'
25 |
26 | - name: 开机启用chronyd服务
27 | shell: systemctl disable chronyd && systemctl enable chronyd
28 | ignore_errors: true
29 |
30 | - name: 开启chronyd服务
31 | shell: systemctl daemon-reload && systemctl restart chronyd
32 | ignore_errors: true
33 | tags: restart_chronyd
34 |
35 | - name: 以轮询的方式等待chronyd服务启动
36 | shell: "systemctl is-active chronyd.service"
37 | register: svc_status
38 | until: '"active" in svc_status.stdout'
39 | retries: 3
40 | delay: 3
41 | tags: restart_chronyd
42 |
--------------------------------------------------------------------------------
/roles/chrony/templates/chronyd.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=chrony, an NTP client/server
3 | Documentation=https://chrony.tuxfamily.org/documentation.html
4 | Conflicts=systemd-timesyncd.service openntpd.service ntpd.service ntp.service ntpsec.service
5 | After=network.target
6 | ConditionCapability=CAP_SYS_TIME
7 |
8 | [Service]
9 | # sysctl net.netfilter.nf_conntrack_count
10 | Type=forking
11 | PIDFile=/var/run/chrony/chronyd.pid
12 | ExecStart=/usr/sbin/chronyd -f /etc/chrony/chrony.conf
13 | ExecStartPost=/sbin/iptables -t raw -A PREROUTING -p udp -m udp --dport 123 -j NOTRACK
14 | ExecStartPost=/sbin/iptables -t raw -A OUTPUT -p udp -m udp --sport 123 -j NOTRACK
15 | PrivateTmp=yes
16 | ProtectHome=yes
17 | ProtectSystem=full
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/roles/chrony/templates/client.conf.j2:
--------------------------------------------------------------------------------
1 | # Use local server
2 | server {{ groups['chrony'][0] }} iburst
3 |
4 | # Record the rate at which the system clock gains/losses time.
5 | driftfile /var/lib/chrony/drift
6 |
7 | # Allow the system clock to be stepped in the first three updates
8 | # if its offset is larger than 1 second.
9 | makestep 1.0 3
10 |
11 | # This directive enables kernel synchronisation (every 11 minutes) of the
12 | # real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
13 | rtcsync
14 |
15 | # Specify directory for dumping measurements.
16 | dumpdir /var/lib/chrony
17 |
18 | # This directive designates subnets (or nodes) from which NTP clients are allowed
19 | # to access to 'chronyd'.
20 | allow {{ local_network }}
21 |
22 | # Stop bad estimates upsetting machine clock.
23 | maxupdateskew 100.0
24 |
25 | # Ignor source level
26 | stratumweight 0
27 |
28 | # Comment this line out to turn off logging.
29 | #log tracking measurements statistics
30 | logdir /var/log/chrony
31 | log statistics measurements tracking
32 | noclientlog
33 |
--------------------------------------------------------------------------------
/roles/chrony/templates/server.conf.j2:
--------------------------------------------------------------------------------
1 | # Use public servers from the pool.ntp.org project.
2 | {% for HOST in ntp_servers %}
3 | server {{ HOST }} iburst
4 | {% endfor %}
5 | pool pool.ntp.org iburst
6 | pool 2.debian.pool.ntp.org iburst
7 |
8 | # Record the rate at which the system clock gains/losses time.
9 | driftfile /var/lib/chrony/drift
10 |
11 | # Allow the system clock to be stepped in the first three updates
12 | # if its offset is larger than 1 second.
13 | makestep 1.0 3
14 |
15 | # This directive enables kernel synchronisation (every 11 minutes) of the
16 | # real-time clock. Note that it can’t be used along with the 'rtcfile' directive.
17 | rtcsync
18 |
19 | # Specify directory for dumping measurements.
20 | dumpdir /var/lib/chrony
21 |
22 | # This directive lets 'chronyd' to serve time even if unsynchronised to any NTP server.
23 | local stratum 10
24 |
25 | # This directive designates subnets (or nodes) from which NTP clients are allowed
26 | # to access to 'chronyd'.
27 | allow {{ local_network }}
28 |
29 | # Stop bad estimates upsetting machine clock.
30 | maxupdateskew 100.0
31 |
32 | # Ignor source level
33 | stratumweight 0
34 |
35 | # Comment this line out to turn off logging.
36 | #log tracking measurements statistics
37 | logdir /var/log/chrony
38 | log statistics measurements tracking
39 | noclientlog
40 |
--------------------------------------------------------------------------------
/roles/cilium/cilium.yml:
--------------------------------------------------------------------------------
1 | - hosts:
2 | - kube_master
3 | - kube_node
4 | roles:
5 | - cilium
6 |
--------------------------------------------------------------------------------
/roles/cilium/files/cilium-1.16.3.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cilium/files/cilium-1.16.3.tgz
--------------------------------------------------------------------------------
/roles/cilium/files/star_war_example/http-sw-app.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: deathstar
6 | spec:
7 | type: ClusterIP
8 | ports:
9 | - port: 80
10 | selector:
11 | org: empire
12 | class: deathstar
13 | ---
14 | apiVersion: apps/v1
15 | kind: Deployment
16 | metadata:
17 | name: deathstar
18 | spec:
19 | replicas: 2
20 | selector:
21 | matchLabels:
22 | org: empire
23 | class: deathstar
24 | template:
25 | metadata:
26 | labels:
27 | org: empire
28 | class: deathstar
29 | spec:
30 | containers:
31 | - name: deathstar
32 | image: docker.io/cilium/starwars
33 | ---
34 | apiVersion: v1
35 | kind: Pod
36 | metadata:
37 | name: tiefighter
38 | labels:
39 | org: empire
40 | class: tiefighter
41 | spec:
42 | containers:
43 | - name: spaceship
44 | image: docker.io/tgraf/netperf
45 | ---
46 | apiVersion: v1
47 | kind: Pod
48 | metadata:
49 | name: xwing
50 | labels:
51 | org: alliance
52 | class: xwing
53 | spec:
54 | containers:
55 | - name: spaceship
56 | image: docker.io/tgraf/netperf
57 |
--------------------------------------------------------------------------------
/roles/cilium/files/star_war_example/sw_l3_l4_l7_policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "cilium.io/v2"
2 | kind: CiliumNetworkPolicy
3 | description: "L7 policy to restrict access to specific HTTP call"
4 | metadata:
5 | name: "rule1"
6 | spec:
7 | endpointSelector:
8 | matchLabels:
9 | org: empire
10 | class: deathstar
11 | ingress:
12 | - fromEndpoints:
13 | - matchLabels:
14 | org: empire
15 | toPorts:
16 | - ports:
17 | - port: "80"
18 | protocol: TCP
19 | rules:
20 | http:
21 | - method: "POST"
22 | path: "/v1/request-landing"
23 |
--------------------------------------------------------------------------------
/roles/cilium/files/star_war_example/sw_l3_l4_policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "cilium.io/v2"
2 | kind: CiliumNetworkPolicy
3 | description: "L3-L4 policy to restrict deathstar access to empire ships only"
4 | metadata:
5 | name: "rule1"
6 | spec:
7 | endpointSelector:
8 | matchLabels:
9 | org: empire
10 | class: deathstar
11 | ingress:
12 | - fromEndpoints:
13 | - matchLabels:
14 | org: empire
15 | toPorts:
16 | - ports:
17 | - port: "80"
18 | protocol: TCP
19 |
--------------------------------------------------------------------------------
/roles/clean/clean_node.yml:
--------------------------------------------------------------------------------
1 | - hosts: "{{ NODE_TO_CLEAN }}"
2 | roles:
3 | - clean
4 |
--------------------------------------------------------------------------------
/roles/clean/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # 是否删除 kube_master 相关服务
2 | DEL_MASTER: "no"
3 |
4 | # 是否删除 kube_node 相关服务
5 | DEL_NODE: "no"
6 |
7 | # 是否删除 etc 相关服务
8 | DEL_ETCD: "no"
9 |
10 | # 是否删除 lb 相关服务
11 | DEL_LB: "no"
12 |
13 | # 是否删除 chrony 相关服务
14 | DEL_CHRONY: "no"
15 |
16 | # 是否删除 kubeasz 环境变量
17 | DEL_ENV: "no"
18 |
--------------------------------------------------------------------------------
/roles/clean/tasks/clean_chrony.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: stop and disable chronyd
3 | service: name=chronyd state=stopped enabled=no
4 | ignore_errors: true
5 |
6 | - name: remove files and dirs
7 | file: name={{ item }} state=absent
8 | with_items:
9 | - "/etc/chrony"
10 | - "/var/lib/chrony"
11 | - "/var/log/chrony"
12 | - "/var/run/chrony"
13 | - "/etc/systemd/system/chronyd.service"
14 | ignore_errors: true
15 | when: "groups['chrony']|length > 0"
16 |
--------------------------------------------------------------------------------
/roles/clean/tasks/clean_etcd.yml:
--------------------------------------------------------------------------------
1 | # to clean 'etcd' nodes
2 | - block:
3 | - name: stop and disable etcd service
4 | service:
5 | name: etcd
6 | state: stopped
7 | enabled: no
8 | ignore_errors: true
9 |
10 | - name: remove files and dirs
11 | file: name={{ item }} state=absent
12 | with_items:
13 | - "{{ ETCD_DATA_DIR }}"
14 | - "{{ ETCD_WAL_DIR }}"
15 | - "/backup/k8s"
16 | - "/etc/systemd/system/etcd.service"
17 | ignore_errors: true
18 | when: "inventory_hostname in groups['etcd']"
19 |
--------------------------------------------------------------------------------
/roles/clean/tasks/clean_lb.yml:
--------------------------------------------------------------------------------
1 | # to clean 'lb' service
2 | - block:
3 | - name: get service info
4 | shell: 'systemctl list-units --type=service |grep -E "l4lb|keepalived|ssh"'
5 | register: service_info
6 |
7 | - name: remove service l4lb
8 | service: name=l4lb state=stopped enabled=no
9 | when: '"l4lb" in service_info.stdout'
10 | ignore_errors: true
11 |
12 | - name: remove service keepalived
13 | service: name=keepalived state=stopped enabled=no
14 | when: '"keepalived" in service_info.stdout'
15 | ignore_errors: true
16 |
17 | - name: remove files and dirs
18 | file: name={{ item }} state=absent
19 | with_items:
20 | - "/etc/l4lb"
21 | - "/etc/keepalived"
22 | - "/etc/systemd/system/l4lb.service"
23 | - "/etc/systemd/system/keepalived.service"
24 | - "/usr/local/sbin/keepalived"
25 | ignore_errors: true
26 | when: "inventory_hostname in groups['ex_lb']"
27 |
--------------------------------------------------------------------------------
/roles/clean/tasks/clean_master.yml:
--------------------------------------------------------------------------------
1 | # to clean 'kube_master' nodes
2 | - name: stop and disable kube_master service
3 | service: name={{ item }} state=stopped enabled=no
4 | with_items:
5 | - kube-apiserver
6 | - kube-controller-manager
7 | - kube-scheduler
8 | ignore_errors: true
9 | when: "inventory_hostname in groups['kube_master']"
10 |
11 | - name: remove files and dirs of 'kube_master' nodes
12 | file: name={{ item }} state=absent
13 | with_items:
14 | - "/var/run/kubernetes"
15 | - "/etc/systemd/system/kube-apiserver.service"
16 | - "/etc/systemd/system/kube-controller-manager.service"
17 | - "/etc/systemd/system/kube-scheduler.service"
18 | ignore_errors: true
19 | when: "inventory_hostname in groups['kube_master']"
20 |
--------------------------------------------------------------------------------
/roles/clean/tasks/main.yml:
--------------------------------------------------------------------------------
1 | #
2 | - import_tasks: clean_etcd.yml
3 | when: 'DEL_ETCD == "yes"'
4 |
5 | - import_tasks: clean_master.yml
6 | when: 'DEL_MASTER == "yes"'
7 |
8 | - import_tasks: clean_node.yml
9 | when: 'DEL_NODE == "yes"'
10 |
11 | - import_tasks: clean_lb.yml
12 | when: 'DEL_LB == "yes"'
13 |
14 | - import_tasks: clean_chrony.yml
15 | when: 'DEL_CHRONY == "yes"'
16 |
17 | - name: clean 'ENV PATH'
18 | lineinfile:
19 | dest: ~/.bashrc
20 | state: absent
21 | regexp: '{{ item }}'
22 | with_items:
23 | - 'kubeasz'
24 | - 'helm completion'
25 | - 'kubectl completion'
26 | - 'crictl completion'
27 | - 'HELM_TLS_ENABLE'
28 | when: 'DEL_ENV == "yes"'
29 |
30 | - name: 删除 k8s_nodename 在节点的 /etc/hosts 地址解析
31 | blockinfile:
32 | path: /etc/hosts
33 | state: absent
34 | marker: "### {mark} KUBEASZ MANAGED BLOCK"
35 |
36 | #- name: remove binaries
37 | # file: name={{ item }} state=absent
38 | # with_items:
39 | # - "/opt/kube/bin"
40 | # when: 'DEL_ETCD == "yes" and DEL_NODE == "yes" and DEL_MASTER == "yes"'
41 |
42 | - name: 重启提示 WARNNING
43 | debug:
44 | msg: "[重要]: 请重启节点以确保清除系统残留的虚拟网卡、路由信息、iptalbes|ipvs规则等 \
45 | [IMPORTANT]: please reboot nodes, makesure to clean out net interfaces, routes and iptables/ipvs rules"
46 | when: 'DEL_ETCD == "yes" and DEL_NODE == "yes" and DEL_MASTER == "yes"'
47 |
--------------------------------------------------------------------------------
/roles/cluster-addon/files/elasticsearch-0.9.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/elasticsearch-0.9.1.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/kube-prometheus-stack-45.23.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/kube-prometheus-stack-45.23.0.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/kubeapps-12.4.3.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/kubeapps-12.4.3.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/kubeblocks-0.9.3.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/kubeblocks-0.9.3.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/kubernetes-dashboard-7.12.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/kubernetes-dashboard-7.12.0.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/minio-0.9.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/minio-0.9.0.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/mongodb-0.9.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/mongodb-0.9.1.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/mysql-0.9.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/mysql-0.9.1.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/postgresql-0.9.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/postgresql-0.9.0.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/files/redis-0.9.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easzlab/kubeasz/eff9793d7d8b38a3d5b93405a6f2cb324b1caa75/roles/cluster-addon/files/redis-0.9.1.tgz
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/cilium_connectivity_check.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备 cilium-check 配置目录
3 | file: name={{ cluster_dir }}/yml/cilium-check state=directory
4 |
5 | - name: 准备部署文件
6 | template: src=cilium-check/{{ item }}.j2 dest={{ cluster_dir }}/yml/cilium-check/{{ item }}
7 | with_items:
8 | - "connectivity-check.yaml"
9 | - "check-part1.yaml"
10 | - "namespace.yaml"
11 |
12 | - name: 创建测试namespace
13 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/cilium-check/namespace.yaml"
14 |
15 | - name: 创建测试part1
16 | shell: "{{ base_dir }}/bin/kubectl apply -n cilium-test -f {{ cluster_dir }}/yml/cilium-check/check-part1.yaml"
17 |
18 | - name: 轮询等待echo pod运行,视下载镜像速度而定
19 | shell: "{{ base_dir }}/bin/kubectl get pod -n cilium-test |grep echo|grep Running|grep '1/1'|wc -l"
20 | register: pod_status
21 | until: pod_status.stdout == "3"
22 | retries: 15
23 | delay: 8
24 | ignore_errors: true
25 |
26 | - name: 创建完整测试connectivity-check
27 | shell: "{{ base_dir }}/bin/kubectl apply -n cilium-test -f {{ cluster_dir }}/yml/cilium-check/connectivity-check.yaml"
28 |
29 | - debug:
30 | msg: "[重要]: 请查看命名空间cilium-test下所有pod,如果均为Running状态,且没有重启数增长,说明cilium连接测试正常。 \
31 | 测试观察一段时间可以整体删除该命名空间所有资源(kubectl delete ns cilium-test)"
32 | when: 'cilium_connectivity_check|bool'
33 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/coredns.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备 DNS的部署文件
3 | template: src=dns/coredns.yaml.j2 dest={{ cluster_dir }}/yml/coredns.yaml
4 |
5 | - name: 删除coredns部署
6 | shell: "{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/coredns.yaml || echo true; sleep 3"
7 | tags: force_change_certs
8 | when: 'CHANGE_CA|bool'
9 |
10 | - name: 创建coredns部署
11 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/coredns.yaml"
12 | tags: force_change_certs
13 | when: 'dns_install == "yes"'
14 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/dashboard.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: prepare some dirs
3 | file: name={{ cluster_dir }}/yml/dashboard state=directory
4 |
5 | - name: 准备 dashboard的部署文件
6 | template: src=dashboard/{{ item }}.j2 dest={{ cluster_dir }}/yml/dashboard/{{ item }}
7 | with_items:
8 | - "dashboard-values.yaml"
9 | - "admin-user-sa-rbac.yaml"
10 | - "read-user-sa-rbac.yaml"
11 |
12 | - name: 创建 dashboard部署
13 | shell: "{{ base_dir }}/bin/helm upgrade kubernetes-dashboard --install --create-namespace \
14 | -n kube-system -f {{ cluster_dir }}/yml/dashboard/dashboard-values.yaml \
15 | {{ base_dir }}/roles/cluster-addon/files/kubernetes-dashboard-{{ dashboardVer }}.tgz"
16 |
17 | - name: 创建用户admin,read
18 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/dashboard/admin-user-sa-rbac.yaml \
19 | && {{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/dashboard/read-user-sa-rbac.yaml"
20 |
21 | when: 'dashboard_install == "yes"'
22 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/kubeapps.yml:
--------------------------------------------------------------------------------
1 | # https://github.com/bitnami/charts/tree/main/bitnami/kubeapps
2 |
3 | - block:
4 | - name: prepare some dirs
5 | file: name={{ cluster_dir }}/yml/kubeapps/token state=directory
6 |
7 | - name: 创建 kubeapps chart 个性化设置
8 | template: src=kubeapps/values.yaml.j2 dest={{ cluster_dir }}/yml/kubeapps/values.yaml
9 |
10 | - name: 准备临时用户tokens
11 | template: src=kubeapps/{{ item }}.j2 dest={{ cluster_dir }}/yml/kubeapps/token/{{ item }}
12 | with_items:
13 | - "kubeapps-admin-token.yaml"
14 | - "single-namespace-edit-token.yaml"
15 | - "single-namespace-view-token.yaml"
16 |
17 | - name: helm 创建 kubeapps
18 | shell: "{{ base_dir }}/bin/helm upgrade kubeapps --install --create-namespace \
19 | -n {{ kubeapps_install_namespace }} -f {{ cluster_dir }}/yml/kubeapps/values.yaml \
20 | {{ base_dir }}/roles/cluster-addon/files/kubeapps-{{ kubeapps_chart_ver }}.tgz"
21 |
22 | - name: 创建临时用户tokens
23 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/kubeapps/token/"
24 | when: 'kubeapps_install == "yes"'
25 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/local-storage.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备 local-storage 配置目录
3 | file: name={{ cluster_dir }}/yml/local-storage state=directory
4 |
5 | - name: 准备 local-storage部署文件
6 | template: src=local-storage/{{ item }}.j2 dest={{ cluster_dir }}/yml/local-storage/{{ item }}
7 | with_items:
8 | - "local-path-storage.yaml"
9 | - "test-pod.yaml"
10 |
11 | - name: 创建 local-storage部署
12 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/local-storage/local-path-storage.yaml"
13 | when: 'local_path_provisioner_install == "yes" or (kubeapps_install == "yes" and kubeapps_storage_class == "local-path")'
14 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: 获取所有已经创建的POD信息
2 | command: "{{ base_dir }}/bin/kubectl get pod --all-namespaces"
3 | register: pod_info
4 | tags: force_change_certs
5 |
6 | - import_tasks: coredns.yml
7 | when: '"coredns" not in pod_info.stdout or CHANGE_CA|bool'
8 |
9 | - import_tasks: nodelocaldns.yml
10 | when: '"node-local-dns" not in pod_info.stdout or CHANGE_CA|bool'
11 |
12 | - import_tasks: metrics-server.yml
13 | when: '"metrics-server" not in pod_info.stdout or CHANGE_CA|bool'
14 |
15 | - import_tasks: dashboard.yml
16 | when: '"kubernetes-dashboard" not in pod_info.stdout or CHANGE_CA|bool'
17 |
18 | - import_tasks: prometheus.yml
19 | when: 'prom_install == "yes"'
20 |
21 | - import_tasks: local-storage.yml
22 | when: '"local-path-provisioner" not in pod_info.stdout or CHANGE_CA|bool'
23 |
24 | - import_tasks: nfs-provisioner.yml
25 | when: '"nfs-client-provisioner" not in pod_info.stdout or CHANGE_CA|bool'
26 |
27 | - import_tasks: cilium_connectivity_check.yml
28 | when: 'CLUSTER_NETWORK == "cilium" and network_check_enabled|bool'
29 |
30 | - import_tasks: network_check.yml
31 | when: 'network_check_enabled|bool and CLUSTER_NETWORK != "cilium"'
32 |
33 | - import_tasks: kubeapps.yml
34 | when: 'kubeapps_install == "yes"'
35 |
36 | - import_tasks: kubeblocks.yml
37 | when: 'kubeblocks_install == "yes"'
38 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/metrics-server.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备 metrics-server的部署文件
3 | template: src=metrics-server/components.yaml.j2 dest={{ cluster_dir }}/yml/metrics-server.yaml
4 |
5 | - name: 删除 metrics-server部署
6 | shell: "{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/metrics-server.yaml || echo true; sleep 3"
7 | tags: force_change_certs
8 | when: 'CHANGE_CA|bool'
9 |
10 | - name: 创建 metrics-server部署
11 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/metrics-server.yaml"
12 | tags: force_change_certs
13 | when: 'metricsserver_install == "yes"'
14 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/network_check.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备 network-check 配置目录
3 | file: name={{ cluster_dir }}/yml/network-check state=directory
4 |
5 | - name: 准备部署文件
6 | template: src=network-check/{{ item }}.j2 dest={{ cluster_dir }}/yml/network-check/{{ item }}
7 | with_items:
8 | - "network-check.yaml"
9 | - "namespace.yaml"
10 |
11 | - name: 创建测试namespace
12 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/network-check/namespace.yaml"
13 |
14 | - name: 创建完整测试network-check
15 | shell: "{{ base_dir }}/bin/kubectl apply -n network-test -f {{ cluster_dir }}/yml/network-check/network-check.yaml"
16 |
17 | - debug:
18 | msg: "[重要]: 请查看命名空间network-test下所有pod,如果均为Completed状态,且没有重启数增长,说明网络连接测试正常。 \
19 | 如果有Pending状态,部分测试需要多节点集群才能完成,如果希望禁用网络测试执行(kubectl delete ns network-test)"
20 | when: 'network_check_enabled|bool'
21 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/nfs-provisioner.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备 nfs-provisioner 配置目录
3 | file: name={{ cluster_dir }}/yml/nfs-provisioner state=directory
4 |
5 | - name: 准备 nfs-provisioner部署文件
6 | template: src=nfs-provisioner/{{ item }}.j2 dest={{ cluster_dir }}/yml/nfs-provisioner/{{ item }}
7 | with_items:
8 | - "nfs-provisioner.yaml"
9 | - "test-pod.yaml"
10 |
11 | - name: 创建 nfs-provisioner部署
12 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nfs-provisioner/nfs-provisioner.yaml"
13 | when: 'nfs_provisioner_install == "yes"'
14 |
--------------------------------------------------------------------------------
/roles/cluster-addon/tasks/nodelocaldns.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备dnscache的部署文件
3 | template: src=dns/nodelocaldns-ipvs.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml
4 | when: "PROXY_MODE == 'ipvs'"
5 |
6 | - name: 准备dnscache的部署文件
7 | template: src=dns/nodelocaldns-iptables.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml
8 | when: "PROXY_MODE == 'iptables'"
9 |
10 | - name: 删除dnscache部署
11 | shell: "{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/nodelocaldns.yaml || echo true; sleep 3"
12 | tags: force_change_certs
13 | when: 'CHANGE_CA|bool'
14 |
15 | - name: 创建dnscache部署
16 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nodelocaldns.yaml"
17 | tags: force_change_certs
18 | when: 'ENABLE_LOCAL_DNS_CACHE|bool'
19 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/cilium-check/namespace.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | kubernetes.io/metadata.name: cilium-test
6 | name: cilium-test
7 | spec:
8 | finalizers:
9 | - kubernetes
10 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/dashboard/admin-user-sa-rbac.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 | namespace: kube-system
6 |
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRoleBinding
10 | metadata:
11 | name: admin-user
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: cluster-admin
16 | subjects:
17 | - kind: ServiceAccount
18 | name: admin-user
19 | namespace: kube-system
20 |
21 | ---
22 | apiVersion: v1
23 | kind: Secret
24 | type: kubernetes.io/service-account-token
25 | metadata:
26 | name: admin-user
27 | namespace: kube-system
28 | annotations:
29 | kubernetes.io/service-account.name: "admin-user"
30 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/dashboard/dashboard-values.yaml.j2:
--------------------------------------------------------------------------------
1 | # General configuration shared across resources
2 | app:
3 | mode: 'dashboard'
4 |
5 | auth:
6 | image:
7 | repository: easzlab.io.local:5000/kubernetesui/dashboard-auth
8 |
9 | api:
10 | image:
11 | repository: easzlab.io.local:5000/kubernetesui/dashboard-api
12 |
13 | web:
14 | image:
15 | repository: easzlab.io.local:5000/kubernetesui/dashboard-web
16 |
17 | metricsScraper:
18 | image:
19 | repository: easzlab.io.local:5000/kubernetesui/dashboard-metrics-scraper
20 |
21 | ## Required Kong sub-chart with DBless configuration to act as a gateway
22 | kong:
23 | enabled: true
24 | image:
25 | repository: easzlab.io.local:5000/kong
26 | proxy:
27 | type: NodePort
28 | http:
29 | enabled: false
30 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeapps/kubeapps-admin-token.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: kubeapps-operator
6 | namespace: kube-system
7 |
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRoleBinding
11 | metadata:
12 | name: kubeapps-operator
13 | roleRef:
14 | apiGroup: rbac.authorization.k8s.io
15 | kind: ClusterRole
16 | name: cluster-admin
17 | subjects:
18 | - kind: ServiceAccount
19 | name: kubeapps-operator
20 | namespace: kube-system
21 |
22 | ---
23 | apiVersion: v1
24 | kind: Secret
25 | metadata:
26 | name: kubeapps-admin-token
27 | namespace: kube-system
28 | annotations:
29 | kubernetes.io/service-account.name: "kubeapps-operator"
30 | type: kubernetes.io/service-account-token
31 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeapps/single-namespace-edit-token.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: kubeapps-editor
6 | namespace: {{ kubeapps_working_namespace }}
7 |
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: RoleBinding
11 | metadata:
12 | name: kubeapps-editor
13 | namespace: {{ kubeapps_working_namespace }}
14 | roleRef:
15 | apiGroup: rbac.authorization.k8s.io
16 | kind: ClusterRole
17 | name: edit
18 | subjects:
19 | - kind: ServiceAccount
20 | name: kubeapps-editor
21 | namespace: {{ kubeapps_working_namespace }}
22 |
23 | ---
24 | apiVersion: v1
25 | kind: Secret
26 | metadata:
27 | name: kubeapps-edit-token
28 | namespace: {{ kubeapps_working_namespace }}
29 | annotations:
30 | kubernetes.io/service-account.name: "kubeapps-editor"
31 | type: kubernetes.io/service-account-token
32 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeapps/single-namespace-view-token.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: kubeapps-viewer
6 | namespace: {{ kubeapps_working_namespace }}
7 |
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: RoleBinding
11 | metadata:
12 | name: kubeapps-viewer
13 | namespace: {{ kubeapps_working_namespace }}
14 | roleRef:
15 | apiGroup: rbac.authorization.k8s.io
16 | kind: ClusterRole
17 | name: view
18 | subjects:
19 | - kind: ServiceAccount
20 | name: kubeapps-viewer
21 | namespace: {{ kubeapps_working_namespace }}
22 |
23 | ---
24 | apiVersion: v1
25 | kind: Secret
26 | metadata:
27 | name: kubeapps-view-token
28 | namespace: {{ kubeapps_working_namespace }}
29 | annotations:
30 | kubernetes.io/service-account.name: "kubeapps-viewer"
31 | type: kubernetes.io/service-account-token
32 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeblocks/kb-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ## @section Common parameters
2 |
3 | ## KubeBlocks container image settings
4 | image:
5 | registry: easzlab.io.local:5000
6 |
7 | ## @param replicaCount
8 | replicaCount: 1
9 |
10 | ## Data protection settings
11 | dataProtection:
12 | enabled: true
13 | enableBackupEncryption: false
14 | image:
15 | repository: apecloud/kubeblocks-dataprotection
16 |
17 | ## @param addonController.jobImagePullPolicy - addon install job image pull policy.
18 | addonController:
19 | enabled: true
20 | jobTTL: "5m"
21 |
22 | ## For avoiding the addon CRs being upgraded when upgrade this chart, set this value to false.
23 | upgradeAddons: false
24 |
25 | ## @param autoInstalledAddons - the list of auto-installed addons when install and upgrade.
26 | autoInstalledAddons:
27 | - "mongodb"
28 | - "mysql"
29 | - "postgresql"
30 | - "redis"
31 | - "snapshot-controller"
32 |
33 |
34 | ## Prometheus Addon
35 | prometheus:
36 | enabled: false
37 |
38 | grafana:
39 | enabled: false
40 |
41 | controllers:
42 | apps:
43 | enabled: true
44 | workloads:
45 | enabled: true
46 | experimental:
47 | enabled: false
48 |
49 | featureGates:
50 | ignoreConfigTemplateDefaultMode:
51 | enabled: false
52 | ignorePodVerticalScaling:
53 | enabled: false
54 | componentReplicasAnnotation:
55 | enabled: true
56 | inPlacePodVerticalScaling:
57 | enabled: false
58 | noRSMEnv:
59 | enabled: false
60 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeblocks/minio-cluster.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: apps.kubeblocks.io/v1alpha1
2 | kind: Cluster
3 | metadata:
4 | name: minio-cluster
5 | spec:
6 | terminationPolicy: Delete
7 | componentSpecs:
8 | - name: minio
9 | componentDef: minio
10 | env:
11 | - name: MINIO_BUCKETS
12 | value: "test-bucket,prod-bucket"
13 | replicas: 4
14 | resources:
15 | limits:
16 | cpu: '0.5'
17 | memory: 0.5Gi
18 | requests:
19 | cpu: '0.5'
20 | memory: 0.5Gi
21 | volumeClaimTemplates:
22 | - name: data
23 | spec:
24 | storageClassName: "local-path"
25 | accessModes:
26 | - ReadWriteOnce
27 | resources:
28 | requests:
29 | storage: 10Gi
30 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeblocks/mongodb-cluster.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: apps.kubeblocks.io/v1alpha1
2 | kind: Cluster
3 | metadata:
4 | name: mongodb-cluster
5 | spec:
6 | terminationPolicy: Delete
7 | affinity:
8 | podAntiAffinity: Preferred
9 | topologyKeys:
10 | - kubernetes.io/hostname
11 | tolerations:
12 | - key: kb-data
13 | operator: Equal
14 | value: 'true'
15 | effect: NoSchedule
16 | componentSpecs:
17 | - name: mongodb
18 | componentDef: mongodb
19 | replicas: 3
20 | resources:
21 | limits:
22 | cpu: '0.5'
23 | memory: 0.5Gi
24 | requests:
25 | cpu: '0.5'
26 | memory: 0.5Gi
27 | volumeClaimTemplates:
28 | - name: data
29 | spec:
30 | storageClassName: "local-path"
31 | accessModes:
32 | - ReadWriteOnce
33 | resources:
34 | requests:
35 | storage: 10Gi
36 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/kubeblocks/pg-cluster.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: apps.kubeblocks.io/v1alpha1
2 | kind: Cluster
3 | metadata:
4 | name: pg-cluster
5 | spec:
6 | terminationPolicy: Delete
7 | componentSpecs:
8 | - name: postgresql
9 | componentDef: postgresql-16
10 | enabledLogs:
11 | - running
12 | disableExporter: true
13 | affinity:
14 | podAntiAffinity: Preferred
15 | topologyKeys:
16 | - kubernetes.io/hostname
17 | tenancy: SharedNode
18 | tolerations:
19 | - key: kb-data
20 | operator: Equal
21 | value: 'true'
22 | effect: NoSchedule
23 | replicas: 2
24 | resources:
25 | limits:
26 | cpu: '0.5'
27 | memory: 0.5Gi
28 | requests:
29 | cpu: '0.5'
30 | memory: 0.5Gi
31 | volumeClaimTemplates:
32 | - name: data
33 | spec:
34 | storageClassName: "local-path"
35 | accessModes:
36 | - ReadWriteOnce
37 | resources:
38 | requests:
39 | storage: 10Gi
40 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/local-storage/test-pod.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: local-path-pvc
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | storageClassName: local-path
9 | resources:
10 | requests:
11 | storage: 128Mi
12 |
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: volume-test
18 | spec:
19 | containers:
20 | - name: volume-test
21 | image: nginx:stable-alpine
22 | imagePullPolicy: IfNotPresent
23 | volumeMounts:
24 | - name: volv
25 | mountPath: /data
26 | ports:
27 | - containerPort: 80
28 | volumes:
29 | - name: volv
30 | persistentVolumeClaim:
31 | claimName: local-path-pvc
32 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/network-check/namespace.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | kubernetes.io/metadata.name: network-test
6 | name: network-test
7 | spec:
8 | finalizers:
9 | - kubernetes
10 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/nfs-provisioner/test-pod.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | name: test-claim
6 | spec:
7 | storageClassName: {{ nfs_storage_class }}
8 | accessModes:
9 | - ReadWriteMany
10 | resources:
11 | requests:
12 | storage: 2Mi
13 |
14 | ---
15 | kind: Pod
16 | apiVersion: v1
17 | metadata:
18 | name: test-pod
19 | spec:
20 | containers:
21 | - name: test-pod
22 | image: busybox
23 | command:
24 | - "/bin/sh"
25 | args:
26 | - "-c"
27 | - "touch /mnt/SUCCESS && exit 0 || exit 1"
28 | volumeMounts:
29 | - name: nfs-pvc
30 | mountPath: "/mnt"
31 | restartPolicy: "Never"
32 | volumes:
33 | - name: nfs-pvc
34 | persistentVolumeClaim:
35 | claimName: test-claim
36 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/prometheus/dingtalk-webhook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | run: dingtalk
7 | name: webhook-dingtalk
8 | namespace: monitor
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | run: dingtalk
14 | selector:
15 | matchLabels:
16 | run: dingtalk
17 | template:
18 | metadata:
19 | labels:
20 | run: dingtalk
21 | spec:
22 | containers:
23 | - name: dingtalk
24 | image: timonwong/prometheus-webhook-dingtalk:v0.3.0
25 | imagePullPolicy: IfNotPresent
26 | # 设置钉钉群聊自定义机器人后,使用实际 access_token 替换下面 xxxxxx部分
27 | args:
28 | - --ding.profile=webhook1=https://oapi.dingtalk.com/robot/send?access_token=xxxxxx
29 | ports:
30 | - containerPort: 8060
31 | protocol: TCP
32 |
33 | ---
34 | apiVersion: v1
35 | kind: Service
36 | metadata:
37 | labels:
38 | run: dingtalk
39 | name: webhook-dingtalk
40 | namespace: monitor
41 | spec:
42 | ports:
43 | - port: 8060
44 | protocol: TCP
45 | targetPort: 8060
46 | selector:
47 | run: dingtalk
48 | sessionAffinity: None
49 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/prometheus/etcd-client-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd-client",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "k8s",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/cluster-addon/templates/prometheus/example-config-alertsmanager.yaml:
--------------------------------------------------------------------------------
1 | alertmanagerFiles:
2 | alertmanager.yml:
3 | global:
4 | smtp_smarthost: 'smtp.163.com:25'
5 | smtp_from: 'xxxx@163.com'
6 | smtp_auth_username: 'xxxx@163.com'
7 | smtp_auth_password: '*********'
8 | smtp_require_tls: false
9 |
10 | route:
11 | group_by: ['alertname', 'pod_name']
12 | group_wait: 10s
13 | group_interval: 5m
14 | #receiver: AlertMail
15 | receiver: dingtalk
16 | repeat_interval: 3h
17 |
18 | receivers:
19 | - name: 'AlertMail'
20 | email_configs:
21 | - to: 'xxxx@163.com'
22 | - name: dingtalk
23 | webhook_configs:
24 | - send_resolved: false
25 | # 需要运行插件 dingtalk-webhook.yaml,详情阅读 docs/guide/prometheus.md
26 | url: http://webhook-dingtalk.monitoring.svc.cluster.local:8060/dingtalk/webhook1/send
27 |
28 |
--------------------------------------------------------------------------------
/roles/cluster-addon/vars/main.yml:
--------------------------------------------------------------------------------
1 | # default values
2 |
3 | # coredns 服务地址,根据SERVICE_CIDR 设置,默认选择网段第二个地址
4 | CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR.split('.')[0] }}.{{ SERVICE_CIDR.split('.')[1] }}.{{ SERVICE_CIDR.split('.')[2] }}.{{ SERVICE_CIDR.split('.')[3]|regex_replace('/.*', '')|int + 2 }}"
5 |
--------------------------------------------------------------------------------
/roles/cluster-restore/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # 指定需要恢复的 etcd 数据备份,默认使用最近的一次备份
2 | # 在ansible 控制端查看备份目录:/etc/kubeasz/clusters/_cluster_name_/backup
3 | db_to_restore: "snapshot.db"
4 |
5 | # etcd 集群间通信的IP和端口, 根据etcd组成员自动生成
6 | TMP_NODES: "{% for h in groups['etcd'] %}etcd-{{ h }}=https://{{ h }}:2380,{% endfor %}"
7 | ETCD_NODES: "{{ TMP_NODES.rstrip(',') }}"
8 |
--------------------------------------------------------------------------------
/roles/cluster-restore/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: 停止ectd 服务
2 | service: name=etcd state=stopped
3 |
4 | - name: 清除etcd 数据目录
5 | file: name={{ ETCD_DATA_DIR }}/member state=absent
6 |
7 | - name: 清理上次备份恢复数据
8 | file: name=/etcd_backup state=absent
9 |
10 | - name: 生成备份目录
11 | file: name=/etcd_backup state=directory
12 |
13 | - name: 准备指定的备份etcd 数据
14 | copy:
15 | src: "{{ cluster_dir }}/backup/{{ db_to_restore }}"
16 | dest: "/etcd_backup/snapshot.db"
17 |
18 | - name: etcd 数据恢复
19 | shell: "cd /etcd_backup && \
20 | ETCDCTL_API=3 {{ bin_dir }}/etcdctl snapshot restore snapshot.db \
21 | --name etcd-{{ inventory_hostname }} \
22 | --initial-cluster {{ ETCD_NODES }} \
23 | --initial-cluster-token etcd-cluster-0 \
24 | --initial-advertise-peer-urls https://{{ inventory_hostname }}:2380"
25 |
26 | - name: 恢复数据至etcd 数据目录
27 | shell: "cp -rf /etcd_backup/etcd-{{ inventory_hostname }}.etcd/member {{ ETCD_DATA_DIR }}/"
28 |
29 | - name: 重启etcd 服务
30 | service: name=etcd state=restarted
31 |
32 | - name: 以轮询的方式等待服务同步完成
33 | shell: "systemctl is-active etcd.service"
34 | register: etcd_status
35 | until: '"active" in etcd_status.stdout'
36 | retries: 8
37 | delay: 8
38 |
--------------------------------------------------------------------------------
/roles/containerd/templates/containerd.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=containerd container runtime
3 | Documentation=https://containerd.io
4 | After=network.target
5 |
6 | [Service]
7 | Environment="PATH={{ bin_dir }}/containerd-bin:/bin:/sbin:/usr/bin:/usr/sbin"
8 | ExecStartPre=-/sbin/modprobe overlay
9 | ExecStart={{ bin_dir }}/containerd-bin/containerd
10 | Restart=always
11 | RestartSec=5
12 | Delegate=yes
13 | KillMode=process
14 | OOMScoreAdjust=-999
15 | LimitNOFILE=1048576
16 | # Having non-zero Limit*s causes performance problems due to accounting overhead
17 | # in the kernel. We recommend using cgroups to do container-local accounting.
18 | LimitNPROC=infinity
19 | LimitCORE=infinity
20 |
21 | [Install]
22 | WantedBy=multi-user.target
23 |
--------------------------------------------------------------------------------
/roles/containerd/templates/crictl.yaml.j2:
--------------------------------------------------------------------------------
1 | runtime-endpoint: unix:///run/containerd/containerd.sock
2 |
--------------------------------------------------------------------------------
/roles/deploy/deploy.yml:
--------------------------------------------------------------------------------
1 | # to create CA, kubeconfig, kube-proxy.kubeconfig etc.
2 | - hosts: localhost
3 | roles:
4 | - deploy
5 |
--------------------------------------------------------------------------------
/roles/deploy/tasks/create-kube-proxy-kubeconfig.yml:
--------------------------------------------------------------------------------
1 | - name: 准备kube-proxy 证书签名请求
2 | template: src=kube-proxy-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-proxy-csr.json
3 |
4 | - name: 创建 kube-proxy证书与私钥
5 | shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
6 | -ca=ca.pem \
7 | -ca-key=ca-key.pem \
8 | -config=ca-config.json \
9 | -profile=kubernetes kube-proxy-csr.json | {{ base_dir }}/bin/cfssljson -bare kube-proxy"
10 |
11 | - name: 设置集群参数
12 | shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
13 | --certificate-authority={{ cluster_dir }}/ssl/ca.pem \
14 | --embed-certs=true \
15 | --server={{ KUBE_APISERVER }} \
16 | --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
17 |
18 | - name: 设置客户端认证参数
19 | shell: "{{ base_dir }}/bin/kubectl config set-credentials kube-proxy \
20 | --client-certificate={{ cluster_dir }}/ssl/kube-proxy.pem \
21 | --client-key={{ cluster_dir }}/ssl/kube-proxy-key.pem \
22 | --embed-certs=true \
23 | --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
24 |
25 | - name: 设置上下文参数
26 | shell: "{{ base_dir }}/bin/kubectl config set-context default \
27 | --cluster=kubernetes \
28 | --user=kube-proxy \
29 | --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
30 |
31 | - name: 选择默认上下文
32 | shell: "{{ base_dir }}/bin/kubectl config use-context default \
33 | --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
34 |
--------------------------------------------------------------------------------
/roles/deploy/tasks/create-kube-scheduler-kubeconfig.yml:
--------------------------------------------------------------------------------
1 | - name: 准备kube-scheduler 证书签名请求
2 | template: src=kube-scheduler-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-scheduler-csr.json
3 |
4 | - name: 创建 kube-scheduler证书与私钥
5 | shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
6 | -ca=ca.pem \
7 | -ca-key=ca-key.pem \
8 | -config=ca-config.json \
9 | -profile=kubernetes kube-scheduler-csr.json | {{ base_dir }}/bin/cfssljson -bare kube-scheduler"
10 |
11 | - name: 设置集群参数
12 | shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
13 | --certificate-authority={{ cluster_dir }}/ssl/ca.pem \
14 | --embed-certs=true \
15 | --server={{ KUBE_APISERVER }} \
16 | --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
17 |
18 | - name: 设置认证参数
19 | shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-scheduler \
20 | --client-certificate={{ cluster_dir }}/ssl/kube-scheduler.pem \
21 | --client-key={{ cluster_dir }}/ssl/kube-scheduler-key.pem \
22 | --embed-certs=true \
23 | --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
24 |
25 | - name: 设置上下文参数
26 | shell: "{{ base_dir }}/bin/kubectl config set-context default \
27 | --cluster=kubernetes \
28 | --user=system:kube-scheduler \
29 | --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
30 |
31 | - name: 选择默认上下文
32 | shell: "{{ base_dir }}/bin/kubectl config use-context default \
33 | --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
34 |
--------------------------------------------------------------------------------
/roles/deploy/tasks/create-kubectl-kubeconfig.yml:
--------------------------------------------------------------------------------
1 | - name: 准备kubectl使用的admin证书签名请求
2 | template: src=admin-csr.json.j2 dest={{ cluster_dir }}/ssl/admin-csr.json
3 |
4 | - name: 创建admin证书与私钥
5 | shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
6 | -ca=ca.pem \
7 | -ca-key=ca-key.pem \
8 | -config=ca-config.json \
9 | -profile=kubernetes admin-csr.json | {{ base_dir }}/bin/cfssljson -bare admin"
10 |
11 | - name: 设置集群参数
12 | shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \
13 | --certificate-authority={{ cluster_dir }}/ssl/ca.pem \
14 | --embed-certs=true \
15 | --server={{ KUBE_APISERVER }} \
16 | --kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
17 |
18 | - name: 设置客户端认证参数
19 | shell: "{{ base_dir }}/bin/kubectl config set-credentials admin \
20 | --client-certificate={{ cluster_dir }}/ssl/admin.pem \
21 | --embed-certs=true \
22 | --client-key={{ cluster_dir }}/ssl/admin-key.pem \
23 | --kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
24 |
25 | - name: 设置上下文参数
26 | shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \
27 | --cluster={{ CLUSTER_NAME }} --user=admin \
28 | --kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
29 |
30 | - name: 选择默认上下文
31 | shell: "{{ base_dir }}/bin/kubectl config use-context {{ CONTEXT_NAME }} \
32 | --kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
33 |
34 | - name: 安装kubeconfig
35 | copy: src={{ cluster_dir }}/kubectl.kubeconfig dest=~/.kube/config mode=0400
36 |
--------------------------------------------------------------------------------
/roles/deploy/templates/admin-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "admin",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "system:masters",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/deploy/templates/ca-config.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "{{ CERT_EXPIRY }}"
5 | },
6 | "profiles": {
7 | "kubernetes": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth",
12 | "client auth"
13 | ],
14 | "expiry": "{{ CERT_EXPIRY }}"
15 | },
16 | "kcfg": {
17 | "usages": [
18 | "signing",
19 | "key encipherment",
20 | "client auth"
21 | ],
22 | "expiry": "{{ CUSTOM_EXPIRY }}"
23 | }
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/roles/deploy/templates/ca-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes-ca",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | },
7 | "names": [
8 | {
9 | "C": "CN",
10 | "ST": "HangZhou",
11 | "L": "XS",
12 | "O": "k8s",
13 | "OU": "System"
14 | }
15 | ],
16 | "ca": {
17 | "expiry": "{{ CA_EXPIRY }}"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/roles/deploy/templates/crb.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: crb-{{ USER_NAME }}
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | {% if USER_TYPE == 'admin' %}
9 | name: cluster-admin
10 | {% else %}
11 | name: view
12 | {% endif %}
13 | subjects:
14 | - kind: User
15 | name: {{ USER_NAME }}
16 | apiGroup: rbac.authorization.k8s.io
17 |
--------------------------------------------------------------------------------
/roles/deploy/templates/kube-controller-manager-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-controller-manager",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "system:kube-controller-manager",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/deploy/templates/kube-proxy-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-proxy",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "k8s",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/deploy/templates/kube-scheduler-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-scheduler",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "system:kube-scheduler",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/deploy/templates/user-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "{{ USER_NAME }}",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "k8s",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/deploy/vars/main.yml:
--------------------------------------------------------------------------------
1 | # apiserver 默认第一个master节点
2 | KUBE_APISERVER: "https://{{ groups['kube_master'][0] }}:{{ SECURE_PORT }}"
3 |
4 | #
5 | ADD_KCFG: false
6 | CUSTOM_EXPIRY: "438000h"
7 |
--------------------------------------------------------------------------------
/roles/docker/templates/daemon.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "data-root": "{{ DOCKER_STORAGE_DIR }}",
3 | "exec-opts": ["native.cgroupdriver={{ CGROUP_DRIVER }}"],
4 | {% if ENABLE_MIRROR_REGISTRY %}
5 | "registry-mirrors": [
6 | "https://docker.nju.edu.cn/",
7 | "https://kuamavit.mirror.aliyuncs.com"
8 | ],
9 | {% endif %}
10 | {% if DOCKER_ENABLE_REMOTE_API %}
11 | "hosts": ["tcp://0.0.0.0:2376", "unix:///var/run/docker.sock"],
12 | {% endif %}
13 | "insecure-registries": [{{ INSECURE_REG_STRING }}],
14 | "max-concurrent-downloads": 10,
15 | "live-restore": true,
16 | "log-driver": "json-file",
17 | "log-level": "warn",
18 | "log-opts": {
19 | "max-size": "50m",
20 | "max-file": "1"
21 | },
22 | "storage-driver": "overlay2"
23 | }
24 |
--------------------------------------------------------------------------------
/roles/docker/templates/docker.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Docker Application Container Engine
3 | Documentation=http://docs.docker.io
4 |
5 | [Service]
6 | Environment="PATH={{ bin_dir }}:/bin:/sbin:/usr/bin:/usr/sbin"
7 | ExecStart={{ bin_dir }}/dockerd
8 | ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
9 | ExecReload=/bin/kill -s HUP $MAINPID
10 | Restart=on-failure
11 | RestartSec=5
12 | LimitNOFILE=infinity
13 | LimitNPROC=infinity
14 | LimitCORE=infinity
15 | Delegate=yes
16 | KillMode=process
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
--------------------------------------------------------------------------------
/roles/docker/vars/main.yml:
--------------------------------------------------------------------------------
1 | # cgroup driver
2 | CGROUP_DRIVER: "{%- if DOCKER_VER|float >= 20.10 -%} \
3 | systemd \
4 | {%- else -%} \
5 | cgroupfs \
6 | {%- endif -%}"
7 |
8 | #
9 | INSECURE_REG_STR: "{% for reg in INSECURE_REG %}\"{{ reg }}\",{% endfor %}"
10 | INSECURE_REG_STRING: "{{ INSECURE_REG_STR.rstrip(',') }}"
11 |
--------------------------------------------------------------------------------
/roles/etcd/clean-etcd.yml:
--------------------------------------------------------------------------------
1 | # WARNNING: clean 'etcd' nodes service & data
2 | - hosts:
3 | - etcd
4 | tasks:
5 | - name: stop and disable etcd service
6 | service:
7 | name: etcd
8 | state: stopped
9 | enabled: no
10 | ignore_errors: true
11 |
12 | - name: remove files and dirs
13 | file: name={{ item }} state=absent
14 | with_items:
15 | - {{ ETCD_DATA_DIR }}
16 | - {{ ETCD_WAL_DIR }}
17 | - "/backup/k8s"
18 | - "/etc/systemd/system/etcd.service"
19 |
--------------------------------------------------------------------------------
/roles/etcd/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # etcd 集群间通信的IP和端口, 根据etcd组成员自动生成
2 | TMP_NODES: "{% for h in groups['etcd'] %}etcd-{{ h }}=https://{{ h }}:2380,{% endfor %}"
3 | ETCD_NODES: "{{ TMP_NODES.rstrip(',') }}"
4 |
5 | # etcd 集群初始状态 new/existing
6 | CLUSTER_STATE: "new"
7 |
--------------------------------------------------------------------------------
/roles/etcd/templates/etcd-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "hosts": [
4 | {% for host in groups['etcd'] %}
5 | "{{ host }}",
6 | {% endfor %}
7 | "127.0.0.1"
8 | ],
9 | "key": {
10 | "algo": "rsa",
11 | "size": 2048
12 | },
13 | "names": [
14 | {
15 | "C": "CN",
16 | "ST": "HangZhou",
17 | "L": "XS",
18 | "O": "k8s",
19 | "OU": "System"
20 | }
21 | ]
22 | }
23 |
--------------------------------------------------------------------------------
/roles/etcd/templates/etcd.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Etcd Server
3 | After=network.target
4 | After=network-online.target
5 | Wants=network-online.target
6 | Documentation=https://github.com/coreos
7 |
8 | [Service]
9 | Type=notify
10 | WorkingDirectory={{ ETCD_DATA_DIR }}
11 | ExecStart={{ bin_dir }}/etcd \
12 | --name=etcd-{{ inventory_hostname }} \
13 | --cert-file={{ ca_dir }}/etcd.pem \
14 | --key-file={{ ca_dir }}/etcd-key.pem \
15 | --peer-cert-file={{ ca_dir }}/etcd.pem \
16 | --peer-key-file={{ ca_dir }}/etcd-key.pem \
17 | --trusted-ca-file={{ ca_dir }}/ca.pem \
18 | --peer-trusted-ca-file={{ ca_dir }}/ca.pem \
19 | --initial-advertise-peer-urls=https://{{ inventory_hostname }}:2380 \
20 | --listen-peer-urls=https://{{ inventory_hostname }}:2380 \
21 | --listen-client-urls=https://{{ inventory_hostname }}:2379,http://127.0.0.1:2379 \
22 | --advertise-client-urls=https://{{ inventory_hostname }}:2379 \
23 | --initial-cluster-token=etcd-cluster-0 \
24 | --initial-cluster={{ ETCD_NODES }} \
25 | --initial-cluster-state={{ CLUSTER_STATE }} \
26 | --data-dir={{ ETCD_DATA_DIR }} \
27 | --wal-dir={{ ETCD_WAL_DIR }} \
28 | --snapshot-count=50000 \
29 | --auto-compaction-retention=1 \
30 | --auto-compaction-mode=periodic \
31 | --max-request-bytes=10485760 \
32 | --quota-backend-bytes=8589934592
33 | Restart=always
34 | RestartSec=15
35 | LimitNOFILE=65536
36 | OOMScoreAdjust=-999
37 |
38 | [Install]
39 | WantedBy=multi-user.target
40 |
--------------------------------------------------------------------------------
/roles/ex-lb/clean-ex-lb.yml:
--------------------------------------------------------------------------------
1 | - hosts:
2 | - ex-lb
3 | tasks:
4 | - name: get service info
5 | shell: 'systemctl list-units --type=service |grep -E "l4lb|keepalived|ssh"'
6 | register: service_info
7 |
8 | - name: remove service l4lb
9 | service: name=l4lb state=stopped enabled=no
10 | when: '"l4lb" in service_info.stdout'
11 | ignore_errors: true
12 |
13 | - name: remove service keepalived
14 | service: name=keepalived state=stopped enabled=no
15 | when: '"keepalived" in service_info.stdout'
16 | ignore_errors: true
17 |
18 | - name: remove files and dirs
19 | file: name={{ item }} state=absent
20 | with_items:
21 | - "/etc/l4lb"
22 | - "/etc/keepalived"
23 | - "/etc/systemd/system/l4lb.service"
24 | - "/etc/systemd/system/keepalived.service"
25 | - "/usr/local/sbin/keepalived"
26 |
--------------------------------------------------------------------------------
/roles/ex-lb/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # 区分多个instance的VRRP组播,同网段不能重复,取值在0-255之间
2 | # 因项目已设置vrrp报文单播模式,所以这个ROUTER_ID 即便同网段里面有重复也没关系
3 | ROUTER_ID: 222
4 |
5 | # 启用 ingress NodePort服务的负载均衡 (yes/no)
6 | INGRESS_NODEPORT_LB: "no"
7 |
8 | # ingress NodePort 的端口号
9 | INGRESS_NODEPORT_LB_PORT: 23456
10 |
11 | # 启用 ingress tls NodePort服务的负载均衡 (yes/no)
12 | INGRESS_TLS_NODEPORT_LB: "no"
13 |
14 | # ingress tls NodePort 的端口号
15 | INGRESS_TLS_NODEPORT_LB_PORT: 23457
16 |
--------------------------------------------------------------------------------
/roles/ex-lb/ex-lb.yml:
--------------------------------------------------------------------------------
1 | - hosts: ex_lb
2 | roles:
3 | - ex-lb
4 |
--------------------------------------------------------------------------------
/roles/ex-lb/templates/keepalived-backup.conf.j2:
--------------------------------------------------------------------------------
1 | global_defs {
2 | }
3 |
4 | vrrp_track_process check-l4lb {
5 | process l4lb
6 | weight -60
7 | delay 3
8 | }
9 |
10 | vrrp_instance VI-01 {
11 | state BACKUP
12 | priority {{ 119 | random(61, 1) }}
13 | unicast_src_ip {{ inventory_hostname }}
14 | unicast_peer {
15 | {% for h in groups['ex_lb'] %}{% if h != inventory_hostname %}
16 | {{ h }}
17 | {% endif %}{% endfor %}
18 | }
19 | dont_track_primary
20 | interface {{ LB_IF }}
21 | virtual_router_id {{ ROUTER_ID }}
22 | advert_int 3
23 | track_process {
24 | check-l4lb
25 | }
26 | virtual_ipaddress {
27 | {{ EX_APISERVER_VIP }}
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/roles/ex-lb/templates/keepalived-master.conf.j2:
--------------------------------------------------------------------------------
1 | global_defs {
2 | }
3 |
4 | vrrp_track_process check-l4lb {
5 | process l4lb
6 | weight -60
7 | delay 3
8 | }
9 |
10 | vrrp_instance VI-01 {
11 | state MASTER
12 | priority 120
13 | unicast_src_ip {{ inventory_hostname }}
14 | unicast_peer {
15 | {% for h in groups['ex_lb'] %}{% if h != inventory_hostname %}
16 | {{ h }}
17 | {% endif %}{% endfor %}
18 | }
19 | dont_track_primary
20 | interface {{ LB_IF }}
21 | virtual_router_id {{ ROUTER_ID }}
22 | advert_int 3
23 | track_process {
24 | check-l4lb
25 | }
26 | virtual_ipaddress {
27 | {{ EX_APISERVER_VIP }}
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/roles/ex-lb/templates/keepalived.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=VRRP High Availability Monitor
3 | After=network-online.target syslog.target
4 | Wants=network-online.target
5 | Documentation=https://keepalived.org/manpage.html
6 |
7 | [Service]
8 | Type=forking
9 | KillMode=process
10 | ExecStart=/usr/local/sbin/keepalived -D -f /etc/keepalived/keepalived.conf
11 | ExecReload=/bin/kill -HUP $MAINPID
12 |
13 | [Install]
14 | WantedBy=multi-user.target
15 |
--------------------------------------------------------------------------------
/roles/ex-lb/templates/l4lb.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=l4 nginx proxy
3 | After=network.target
4 | After=network-online.target
5 | Wants=network-online.target
6 |
7 | [Service]
8 | Type=forking
9 | ExecStartPre=/etc/l4lb/sbin/l4lb -c /etc/l4lb/conf/l4lb.conf -p /etc/l4lb -t
10 | ExecStart=/etc/l4lb/sbin/l4lb -c /etc/l4lb/conf/l4lb.conf -p /etc/l4lb
11 | ExecReload=/etc/l4lb/sbin/l4lb -c /etc/l4lb/conf/l4lb.conf -p /etc/l4lb -s reload
12 | PrivateTmp=true
13 | Restart=always
14 | RestartSec=15
15 | StartLimitInterval=0
16 | LimitNOFILE=65536
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
--------------------------------------------------------------------------------
/roles/flannel/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: 配置 flannel DaemonSet yaml文件
2 | template: src=kube-flannel.yaml.j2 dest={{ cluster_dir }}/yml/flannel.yaml
3 | run_once: true
4 | connection: local
5 |
6 | - name: 删除 flannel网络
7 | shell: "{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/flannel.yaml || echo true; sleep 3"
8 | run_once: true
9 | connection: local
10 | tags: force_change_certs
11 | when: 'CHANGE_CA|bool'
12 |
13 | - name: 运行 flannel网络
14 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/flannel.yaml"
15 | run_once: true
16 | connection: local
17 | tags: force_change_certs
18 |
19 | - name: 删除默认cni配置
20 | file: path=/etc/cni/net.d/10-default.conf state=absent
21 |
22 | - name: 轮询等待flannel 运行,视下载镜像速度而定
23 | shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
24 | register: pod_status
25 | until: pod_status.stdout == "Running"
26 | retries: 15
27 | delay: 8
28 | ignore_errors: true
29 | connection: local
30 | tags: force_change_certs
31 |
--------------------------------------------------------------------------------
/roles/harbor/templates/harbor-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "harbor",
3 | "hosts": [
4 | "127.0.0.1",
5 | "{{ inventory_hostname }}",
6 | "{{ HARBOR_DOMAIN }}"
7 | ],
8 | "key": {
9 | "algo": "rsa",
10 | "size": 2048
11 | },
12 | "names": [
13 | {
14 | "C": "CN",
15 | "ST": "HangZhou",
16 | "L": "XS",
17 | "O": "k8s",
18 | "OU": "System"
19 | }
20 | ]
21 | }
22 |
--------------------------------------------------------------------------------
/roles/harbor/vars/main.yml:
--------------------------------------------------------------------------------
1 | # harbor 主版本号
2 | # 从完整版本号提取出主版本号
3 | HARBOR_VER_MAIN: "{{ HARBOR_VER.split('.')[0] }}.{{ HARBOR_VER.split('.')[1] }}"
4 |
5 | # HARBOR_HOSTNAME 值设置
6 | HARBOR_HOSTNAME: "{% if HARBOR_DOMAIN != '' %}{{ HARBOR_DOMAIN }}{% else %}{{ inventory_hostname }}{% endif %}"
7 |
8 | # harobr 默认安装选项
9 | HARBOR_INST_OPS: ""
10 |
--------------------------------------------------------------------------------
/roles/kube-lb/clean-kube-lb.yml:
--------------------------------------------------------------------------------
1 | - hosts:
2 | - kube-master
3 | - kube-node
4 | tasks:
5 | - name: stop and disable kube-lb service
6 | service:
7 | name: kube-lb
8 | state: stopped
9 | enabled: no
10 | ignore_errors: true
11 |
12 | - name: remove files and dirs
13 | file: name={{ item }} state=absent
14 | with_items:
15 | - "/etc/kube-lb"
16 | - "/etc/systemd/system/kube-lb.service"
17 |
--------------------------------------------------------------------------------
/roles/kube-lb/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: prepare some dirs
2 | file: name={{ item }} state=directory
3 | with_items:
4 | - "/etc/kube-lb/sbin"
5 | - "/etc/kube-lb/logs"
6 | - "/etc/kube-lb/conf"
7 |
8 | - name: 下载二进制文件kube-lb(nginx)
9 | copy: src={{ base_dir }}/bin/nginx dest=/etc/kube-lb/sbin/kube-lb mode=0755
10 |
11 | - name: 创建kube-lb的配置文件
12 | template: src=kube-lb.conf.j2 dest=/etc/kube-lb/conf/kube-lb.conf
13 | tags: restart_kube-lb
14 |
15 | - name: 创建kube-lb的systemd unit文件
16 | template: src=kube-lb.service.j2 dest=/etc/systemd/system/kube-lb.service
17 | tags: restart_kube-lb
18 |
19 | - name: 开机启用kube-lb服务
20 | shell: systemctl enable kube-lb
21 | ignore_errors: true
22 |
23 | - name: 开启kube-lb服务
24 | shell: systemctl daemon-reload && systemctl restart kube-lb
25 | ignore_errors: true
26 | tags: restart_kube-lb
27 |
28 | - name: 以轮询的方式等待kube-lb服务启动
29 | shell: "systemctl is-active kube-lb.service"
30 | register: svc_status
31 | until: '"active" in svc_status.stdout'
32 | retries: 3
33 | delay: 3
34 | tags: restart_kube-lb
35 |
--------------------------------------------------------------------------------
/roles/kube-lb/templates/kube-lb.conf.j2:
--------------------------------------------------------------------------------
1 | user root;
2 | worker_processes 1;
3 |
4 | error_log /etc/kube-lb/logs/error.log warn;
5 |
6 | events {
7 | worker_connections 3000;
8 | }
9 |
10 | stream {
11 | upstream backend {
12 | {% for host in groups['kube_master'] %}
13 | server {{ host }}:{{ SECURE_PORT }} max_fails=2 fail_timeout=3s;
14 | {% endfor %}
15 | }
16 |
17 | server {
18 | listen 127.0.0.1:{{ SECURE_PORT }};
19 | proxy_connect_timeout 1s;
20 | proxy_pass backend;
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/roles/kube-lb/templates/kube-lb.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=l4 nginx proxy for kube-apiservers
3 | After=network.target
4 | After=network-online.target
5 | Wants=network-online.target
6 |
7 | [Service]
8 | Type=forking
9 | ExecStartPre=/etc/kube-lb/sbin/kube-lb -c /etc/kube-lb/conf/kube-lb.conf -p /etc/kube-lb -t
10 | ExecStart=/etc/kube-lb/sbin/kube-lb -c /etc/kube-lb/conf/kube-lb.conf -p /etc/kube-lb
11 | ExecReload=/etc/kube-lb/sbin/kube-lb -c /etc/kube-lb/conf/kube-lb.conf -p /etc/kube-lb -s reload
12 | PrivateTmp=true
13 | Restart=always
14 | RestartSec=15
15 | StartLimitInterval=0
16 | LimitNOFILE=65536
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
--------------------------------------------------------------------------------
/roles/kube-master/templates/aggregator-proxy-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "aggregator",
3 | "hosts": [],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "CN",
11 | "ST": "HangZhou",
12 | "L": "XS",
13 | "O": "k8s",
14 | "OU": "System"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/roles/kube-master/templates/kube-controller-manager.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Controller Manager
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 |
5 | [Service]
6 | ExecStart={{ bin_dir }}/kube-controller-manager \
7 | --allocate-node-cidrs=true \
8 | --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
9 | --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
10 | --bind-address=0.0.0.0 \
11 | --cluster-cidr={{ CLUSTER_CIDR }} \
12 | --cluster-name=kubernetes \
13 | --cluster-signing-cert-file={{ ca_dir }}/ca.pem \
14 | --cluster-signing-key-file={{ ca_dir }}/ca-key.pem \
15 | --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
16 | --leader-elect=true \
17 | --node-cidr-mask-size={{ NODE_CIDR_LEN }} \
18 | --root-ca-file={{ ca_dir }}/ca.pem \
19 | --service-account-private-key-file={{ ca_dir }}/ca-key.pem \
20 | --service-cluster-ip-range={{ SERVICE_CIDR }} \
21 | --use-service-account-credentials=true \
22 | --v=2
23 | Restart=always
24 | RestartSec=5
25 |
26 | [Install]
27 | WantedBy=multi-user.target
28 |
--------------------------------------------------------------------------------
/roles/kube-master/templates/kube-scheduler.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Scheduler
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 |
5 | [Service]
6 | ExecStart={{ bin_dir }}/kube-scheduler \
7 | --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
8 | --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
9 | --bind-address=0.0.0.0 \
10 | --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
11 | --leader-elect=true \
12 | --v=2
13 | Restart=always
14 | RestartSec=5
15 |
16 | [Install]
17 | WantedBy=multi-user.target
18 |
--------------------------------------------------------------------------------
/roles/kube-master/templates/kubernetes-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "hosts": [
4 | "127.0.0.1",
5 | {% if groups['ex_lb']|length > 0 %}
6 | "{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}",
7 | {% endif %}
8 | {% for host in groups['kube_master'] %}
9 | "{{ host }}",
10 | {% endfor %}
11 | "{{ CLUSTER_KUBERNETES_SVC_IP }}",
12 | {% for host in MASTER_CERT_HOSTS %}
13 | "{{ host }}",
14 | {% endfor %}
15 | "kubernetes",
16 | "kubernetes.default",
17 | "kubernetes.default.svc",
18 | "kubernetes.default.svc.cluster",
19 | "kubernetes.default.svc.cluster.local"
20 | ],
21 | "key": {
22 | "algo": "rsa",
23 | "size": 2048
24 | },
25 | "names": [
26 | {
27 | "C": "CN",
28 | "ST": "HangZhou",
29 | "L": "XS",
30 | "O": "k8s",
31 | "OU": "System"
32 | }
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/roles/kube-master/vars/main.yml:
--------------------------------------------------------------------------------
1 | # etcd 集群服务地址列表, 根据etcd组成员自动生成
2 | TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}"
3 | ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}"
4 |
5 | # kubernetes.default.svc 地址根据SERVICE_CIDR 设置为网段的第一个地址
6 | CLUSTER_KUBERNETES_SVC_IP: "{{ SERVICE_CIDR.split('.')[0] }}.{{ SERVICE_CIDR.split('.')[1] }}.{{ SERVICE_CIDR.split('.')[2] }}.{{ SERVICE_CIDR.split('.')[3]|regex_replace('/.*', '')|int + 1 }}"
7 |
--------------------------------------------------------------------------------
/roles/kube-node/templates/cni-default.conf.j2:
--------------------------------------------------------------------------------
1 | {
2 | "name": "mynet",
3 | "cniVersion": "0.3.1",
4 | "type": "bridge",
5 | "bridge": "mynet0",
6 | "isDefaultGateway": true,
7 | "ipMasq": true,
8 | "hairpinMode": true,
9 | "ipam": {
10 | "type": "host-local",
11 | "subnet": "{{ CLUSTER_CIDR }}"
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/roles/kube-node/templates/kube-proxy-config.yaml.j2:
--------------------------------------------------------------------------------
1 | kind: KubeProxyConfiguration
2 | apiVersion: kubeproxy.config.k8s.io/v1alpha1
3 | bindAddress: 0.0.0.0
4 | clientConnection:
5 | kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
6 | # 根据clusterCIDR 判断集群内部和外部流量,配置clusterCIDR选项后,kube-proxy 会对访问 Service IP 的请求做 SNAT
7 | clusterCIDR: "{{ CLUSTER_CIDR }}"
8 | conntrack:
9 | maxPerCore: 32768
10 | min: 131072
11 | tcpCloseWaitTimeout: 1h0m0s
12 | tcpEstablishedTimeout: 24h0m0s
13 | healthzBindAddress: 0.0.0.0:10256
14 | # hostnameOverride 值必须与 kubelet 的对应一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 iptables 规则
15 | hostnameOverride: "{{ K8S_NODENAME }}"
16 | metricsBindAddress: 0.0.0.0:10249
17 | mode: "{{ PROXY_MODE }}"
18 | {% if PROXY_MODE == "ipvs" %}
19 | ipvs:
20 | excludeCIDRs: null
21 | minSyncPeriod: 0s
22 | scheduler: ""
23 | strictARP: {{ ENABLE_IPVS_STRICT_ARP }}
24 | syncPeriod: 30s
25 | tcpFinTimeout: 0s
26 | tcpTimeout: 0s
27 | udpTimeout: 0s
28 | {% endif %}
29 |
--------------------------------------------------------------------------------
/roles/kube-node/templates/kube-proxy.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Kube-Proxy Server
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 | After=network.target
5 |
6 | [Service]
7 | WorkingDirectory=/var/lib/kube-proxy
8 | ExecStart={{ bin_dir }}/kube-proxy \
9 | --config=/var/lib/kube-proxy/kube-proxy-config.yaml
10 | Restart=always
11 | RestartSec=5
12 | LimitNOFILE=65536
13 |
14 | [Install]
15 | WantedBy=multi-user.target
16 |
--------------------------------------------------------------------------------
/roles/kube-node/templates/kubelet-csr.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:node:{{ K8S_NODENAME }}",
3 | "hosts": [
4 | "127.0.0.1",
5 | "{{ inventory_hostname }}",
6 | "{{ K8S_NODENAME }}"
7 | ],
8 | "key": {
9 | "algo": "rsa",
10 | "size": 2048
11 | },
12 | "names": [
13 | {
14 | "C": "CN",
15 | "ST": "HangZhou",
16 | "L": "XS",
17 | "O": "system:nodes",
18 | "OU": "System"
19 | }
20 | ]
21 | }
22 |
--------------------------------------------------------------------------------
/roles/kube-node/vars/main.yml:
--------------------------------------------------------------------------------
1 | # 设置 APISERVER 地址,使用kube-lb负载均衡监听地址
2 | KUBE_APISERVER: "https://127.0.0.1:{{ SECURE_PORT }}"
3 |
4 | # cgroup driver
5 | CGROUP_DRIVER: "systemd"
6 |
7 | # coredns 服务地址,根据SERVICE_CIDR 设置,默认选择网段第二个地址
8 | CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR.split('.')[0] }}.{{ SERVICE_CIDR.split('.')[1] }}.{{ SERVICE_CIDR.split('.')[2] }}.{{ SERVICE_CIDR.split('.')[3]|regex_replace('/.*', '')|int + 2 }}"
9 |
10 | # pod-max-pids
11 | POD_MAX_PIDS: -1
12 |
13 | # Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2
14 | ENABLE_IPVS_STRICT_ARP: false
15 |
--------------------------------------------------------------------------------
/roles/kube-ovn/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # 删除原有cni配置
2 | - name: 删除默认cni配置
3 | file: path=/etc/cni/net.d/10-default.conf state=absent
4 |
5 | - block:
6 | - name: 准备安装相关文件
7 | template: src={{ item }}.j2 dest={{ cluster_dir }}/yml/{{ item }}
8 | with_items:
9 | - "install.sh"
10 | - "coredns.yaml"
11 |
12 | - name: 准备dnscache的部署文件
13 | template: src=nodelocaldns-ipvs.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml
14 | when: "PROXY_MODE == 'ipvs'"
15 |
16 | - name: 准备dnscache的部署文件
17 | template: src=nodelocaldns-iptables.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml
18 | when: "PROXY_MODE == 'iptables'"
19 |
20 | - name: 创建coredns,dnscache部署
21 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/coredns.yaml && \
22 | {{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nodelocaldns.yaml"
23 |
24 | - name: 安装kube-ovn网络
25 | shell: 'export PATH="{{ base_dir }}/bin/:$PATH"; cd {{ cluster_dir }}/yml/ && \
26 | bash install.sh >> /tmp/install-kube-ovn-`date +"%Y%m%d%H%M%S"`.log 2>&1'
27 | run_once: true
28 | ignore_errors: true
29 | connection: local
30 |
31 | # 等待网络插件部署成功
32 | - name: 轮询等待kube-ovn 运行,视下载镜像速度而定
33 | shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'kube-ovn-cni'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
34 | register: pod_status
35 | until: pod_status.stdout == "Running"
36 | retries: 15
37 | delay: 8
38 | ignore_errors: true
39 | connection: local
40 | tags: force_change_certs
41 |
--------------------------------------------------------------------------------
/roles/kube-ovn/vars/main.yml:
--------------------------------------------------------------------------------
1 | # CLUSTER_CIDR_GW 作为 POD_GATEWAY,选取CLUSTER_CIDR 网段中的第一个地址
2 | CLUSTER_CIDR_GW: "{{ CLUSTER_CIDR.split('.')[0] }}.{{ CLUSTER_CIDR.split('.')[1] }}.{{ CLUSTER_CIDR.split('.')[2] }}.{{ CLUSTER_CIDR.split('.')[3]|regex_replace('/.*', '')|int + 1 }}"
3 |
4 | # coredns 服务地址,根据SERVICE_CIDR 设置,默认选择网段第二个地址
5 | CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR.split('.')[0] }}.{{ SERVICE_CIDR.split('.')[1] }}.{{ SERVICE_CIDR.split('.')[2] }}.{{ SERVICE_CIDR.split('.')[3]|regex_replace('/.*', '')|int + 2 }}"
6 |
--------------------------------------------------------------------------------
/roles/kube-router/kube-router.yml:
--------------------------------------------------------------------------------
1 | - hosts:
2 | - kube_master
3 | - kube_node
4 | roles:
5 | - kube-router
6 |
--------------------------------------------------------------------------------
/roles/kube-router/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - block:
2 | - name: 准备配置 kube-router DaemonSet
3 | template: src=kuberouter.yaml.j2 dest={{ cluster_dir }}/yml/kube-router.yaml
4 |
5 | - name: 删除 kube-router DaemonSet
6 | shell: "{{ base_dir }}/bin/kubectl delete -f {{ cluster_dir }}/yml/kube-router.yaml || echo true; sleep 3"
7 | when: 'CHANGE_CA|bool'
8 |
9 | # 只需单节点执行一次
10 | - name: 运行 kube-router DaemonSet
11 | shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/kube-router.yaml"
12 | run_once: true
13 | connection: local
14 | tags: force_change_certs
15 |
16 | # 删除原有cni配置
17 | - name: 删除默认cni配置
18 | file: path=/etc/cni/net.d/10-default.conf state=absent
19 |
20 | # 等待网络插件部署成功,视下载镜像速度而定
21 | - name: 轮询等待kube-router 运行,视下载镜像速度而定
22 | shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'kube-router'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
23 | register: pod_status
24 | until: pod_status.stdout == "Running"
25 | retries: 15
26 | delay: 8
27 | ignore_errors: true
28 | connection: local
29 | tags: force_change_certs
30 |
--------------------------------------------------------------------------------
/roles/os-harden/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: update-initramfs
3 | command: 'update-initramfs -u'
4 |
--------------------------------------------------------------------------------
/roles/os-harden/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: "Sebastian Gumprich"
4 | description: 'This Ansible role provides numerous security-related ssh configurations, providing all-round base protection.'
5 | company: Hardening Framework Team
6 | license: Apache License 2.0
7 | min_ansible_version: '2.5'
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 7
12 | - 8
13 | - name: Ubuntu
14 | versions:
15 | - xenial
16 | - bionic
17 | - name: Debian
18 | versions:
19 | - stretch
20 | - buster
21 | - name: Amazon
22 | - name: Fedora
23 | - name: Archlinux
24 | - name: SmartOS
25 | - name: openSUSE
26 | galaxy_tags:
27 | - system
28 | - security
29 | - hardening
30 | dependencies: []
31 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/apt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: remove deprecated or insecure packages | package-01 - package-09
3 | apt:
4 | name: '{{ os_security_packages_list }}'
5 | state: 'absent'
6 | purge: 'yes'
7 | when: os_security_packages_clean | bool
8 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/auditd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install auditd package | package-08
3 | package:
4 | name: '{{ auditd_package }}'
5 | state: 'present'
6 |
7 | - name: configure auditd | package-08
8 | template:
9 | src: 'etc/audit/auditd.conf.j2'
10 | dest: '/etc/audit/auditd.conf'
11 | owner: 'root'
12 | group: 'root'
13 | mode: '0640'
14 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/limits.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: create limits.d-directory if it does not exist | sysctl-31a, sysctl-31b
4 | file:
5 | path: '/etc/security/limits.d'
6 | owner: 'root'
7 | group: 'root'
8 | mode: '0755'
9 | state: 'directory'
10 |
11 | - name: create additional limits config file -> 10.hardcore.conf | sysctl-31a, sysctl-31b
12 | pam_limits:
13 | dest: '/etc/security/limits.d/10.hardcore.conf'
14 | domain: '*'
15 | limit_type: hard
16 | limit_item: core
17 | value: '0'
18 | comment: Prevent core dumps for all users. These are usually not needed and may contain sensitive information
19 |
20 | - name: set 10.hardcore.conf perms to 0400 and root ownership
21 | file:
22 | path: /etc/security/limits.d/10.hardcore.conf
23 | owner: 'root'
24 | group: 'root'
25 | mode: '0440'
26 | state: touch
27 | modification_time: preserve
28 | access_time: preserve
29 |
30 | when: not os_security_kernel_enable_core_dump | bool
31 |
32 | - name: remove 10.hardcore.conf config file
33 | file:
34 | path: /etc/security/limits.d/10.hardcore.conf
35 | state: absent
36 | when: os_security_kernel_enable_core_dump | bool
37 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/login_defs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create login.defs | os-05, os-05b
3 | template:
4 | src: 'etc/login.defs.j2'
5 | dest: '/etc/login.defs'
6 | owner: 'root'
7 | group: 'root'
8 | mode: '0444'
9 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_tasks: hardening.yml
3 | ignore_errors: true
4 | when: os_hardening_enabled | bool
5 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/modprobe.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install modprobe to disable filesystems | os-10
3 | package:
4 | name: '{{ modprobe_package }}'
5 | state: 'present'
6 |
7 | - name: check if efi is installed
8 | stat:
9 | path: "/sys/firmware/efi"
10 | register: efi_installed
11 |
12 | - name: remove vfat from fs-list if efi is used
13 | set_fact:
14 | os_unused_filesystems: "{{ os_unused_filesystems | difference('vfat') }}"
15 | when:
16 | - efi_installed.stat.isdir is defined
17 | - efi_installed.stat.isdir
18 |
19 | - name: remove used filesystems from fs-list
20 | set_fact:
21 | os_unused_filesystems: "{{ os_unused_filesystems | difference(ansible_mounts | map(attribute='fstype') | list) }}"
22 | # we cannot do this on el6 and below, because these systems don't support the map function
23 | when: not ((ansible_facts.os_family in ['Oracle Linux', 'RedHat']) and ansible_facts.distribution_major_version < '7')
24 |
25 | - name: disable unused filesystems | os-10
26 | template:
27 | src: 'etc/modprobe.d/modprobe.j2'
28 | dest: '/etc/modprobe.d/dev-sec.conf'
29 | owner: 'root'
30 | group: 'root'
31 | mode: '0644'
32 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/profile.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add pinerolo_profile.sh to profile.d
3 | template:
4 | src: 'etc/profile.d/profile.conf.j2'
5 | dest: '/etc/profile.d/pinerolo_profile.sh'
6 | owner: 'root'
7 | group: 'root'
8 | mode: '0750'
9 | when: not os_security_kernel_enable_core_dump | bool
10 |
11 | - name: remove pinerolo_profile.sh from profile.d
12 | file:
13 | path: /etc/profile.d/pinerolo_profile.sh
14 | state: absent
15 | when: os_security_kernel_enable_core_dump | bool
16 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/rhosts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get user accounts | os-09
3 | command: "awk -F: '{print $1}' /etc/passwd"
4 | changed_when: false
5 | check_mode: false
6 | register: users_accounts
7 |
8 | - name: delete rhosts-files from system | os-09
9 | file:
10 | dest: '~{{ item }}/.rhosts'
11 | state: 'absent'
12 | with_flattened: '{{ users_accounts.stdout_lines | default([]) }}'
13 |
14 | - name: delete hosts.equiv from system | os-01
15 | file:
16 | dest: '/etc/hosts.equiv'
17 | state: 'absent'
18 |
19 | - name: delete .netrc-files from system | os-09
20 | file:
21 | dest: '~{{ item }}/.netrc'
22 | state: 'absent'
23 | with_flattened: '{{ users_accounts.stdout_lines | default([]) }}'
24 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/securetty.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create securetty
3 | template:
4 | src: 'etc/securetty.j2'
5 | dest: '/etc/securetty'
6 | owner: 'root'
7 | group: 'root'
8 | mode: '0400'
9 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/selinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: configure selinux | selinux-01
3 | selinux:
4 | policy: "{{ os_selinux_policy }}"
5 | state: "{{ os_selinux_state }}"
6 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/suid_sgid.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: remove suid/sgid bit from binaries in blacklist | os-06
3 | file:
4 | path: '{{ item }}'
5 | mode: 'a-s'
6 | state: 'file'
7 | follow: 'yes'
8 | failed_when: false
9 | with_flattened:
10 | - '{{ os_security_suid_sgid_system_blacklist }}'
11 | - '{{ os_security_suid_sgid_blacklist }}'
12 |
13 | - name: find binaries with suid/sgid set | os-06
14 | shell: find / -xdev \( -perm -4000 -o -perm -2000 \) -type f ! -path '/proc/*' -print 2>/dev/null
15 | register: sbit_binaries
16 | when: os_security_suid_sgid_remove_from_unknown | bool
17 | changed_when: false
18 |
19 | - name: gather files from which to remove suids/sgids and remove system white-listed files | os-06
20 | set_fact:
21 | suid: '{{ sbit_binaries.stdout_lines | difference(os_security_suid_sgid_system_whitelist) }}'
22 | when: os_security_suid_sgid_remove_from_unknown | bool
23 |
24 | - name: remove suid/sgid bit from all binaries except in system and user whitelist | os-06
25 | file:
26 | path: '{{ item }}'
27 | mode: 'a-s'
28 | state: 'file'
29 | follow: 'yes'
30 | with_flattened:
31 | - '{{ suid | default([]) | difference(os_security_suid_sgid_whitelist) }}'
32 | when: os_security_suid_sgid_remove_from_unknown | bool
33 |
--------------------------------------------------------------------------------
/roles/os-harden/tasks/user_accounts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: get UID_MIN from login.defs
3 | shell: awk '/^\s*UID_MIN\s*([0-9]*).*?$/ {print $2}' /etc/login.defs
4 | args:
5 | removes: /etc/login.defs
6 | register: uid_min
7 | check_mode: false
8 | changed_when: false
9 |
10 | - name: calculate UID_MAX from UID_MIN by substracting 1
11 | set_fact:
12 | uid_max: '{{ uid_min.stdout | int - 1 }}'
13 | when: uid_min.stdout|int > 0
14 |
15 | - name: set UID_MAX on Debian-systems if no login.defs exist
16 | set_fact:
17 | uid_max: '999'
18 | when:
19 | - ansible_facts.os_family == 'Debian'
20 | - uid_max is not defined
21 |
22 | - name: set UID_MAX on other systems if no login.defs exist
23 | set_fact:
24 | uid_max: '499'
25 | when: uid_max is not defined
26 |
27 | - name: get all system accounts
28 | command: awk -F'':'' '{ if ( $3 <= {{ uid_max|quote }} ) print $1}' /etc/passwd
29 | args:
30 | removes: /etc/passwd
31 | changed_when: false
32 | check_mode: false
33 | register: sys_accs
34 |
35 | - name: remove always ignored system accounts from list
36 | set_fact:
37 | sys_accs_cond: '{{ sys_accs.stdout_lines | difference(os_always_ignore_users) }}'
38 | check_mode: false
39 |
40 | - name: change system accounts not on the user provided ignore-list
41 | user:
42 | name: '{{ item }}'
43 | shell: '{{ os_nologin_shell_path }}'
44 | password: '*'
45 | createhome: false
46 | with_flattened:
47 | - '{{ sys_accs_cond | default([]) | difference(os_ignore_users) | list }}'
48 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/etc/audit/auditd.conf.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | log_file = /var/log/audit/audit.log
4 | log_format = RAW
5 | log_group = root
6 | priority_boost = 4
7 | flush = INCREMENTAL
8 | freq = 20
9 | num_logs = 5
10 | disp_qos = lossy
11 | dispatcher = /sbin/audispd
12 | name_format = NONE
13 | ##name = mydomain
14 | max_log_file = 6
15 | max_log_file_action = {{ os_auditd_max_log_file_action }}
16 | space_left = 75
17 | space_left_action = SYSLOG
18 | action_mail_acct = root
19 | admin_space_left = 50
20 | admin_space_left_action = SUSPEND
21 | disk_full_action = SUSPEND
22 | disk_error_action = SUSPEND
23 | ##tcp_listen_port =
24 | tcp_listen_queue = 5
25 | tcp_max_per_addr = 1
26 | ##tcp_client_ports = 1024-65535
27 | tcp_client_max_idle = 0
28 | enable_krb5 = no
29 | krb5_principal = auditd
30 | ##krb5_key_file = /etc/audit/audit.key
31 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/etc/modprobe.d/modprobe.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | {% for fs in os_unused_filesystems | difference(os_filesystem_whitelist) %}
4 | install {{fs}} /bin/true
5 | {% endfor %}
6 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/etc/profile.d/profile.conf.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | # Disable core dumps via soft limits for all users. Compliance to this setting is voluntary and can be modified by users up to a hard limit. This setting is a sane default.
4 | ulimit -S -c 0 > /dev/null 2>&1
5 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/etc/securetty.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | # A list of TTYs, from which root can log in
4 | # see `man securetty` for reference
5 | {{ "\n".join(os_auth_root_ttys) }}
6 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/etc/sysconfig/rhel_sysconfig_init.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | # color => new RH6.0 bootup
4 | # verbose => old-style bootup
5 | # anything else => new style bootup without ANSI colors or positioning
6 | BOOTUP=color
7 | # column to start "[ OK ]" label in
8 | RES_COL=60
9 | # terminal sequence to move to that column. You could change this
10 | # to something like "tput hpa ${RES_COL}" if your terminal supports it
11 | MOVE_TO_COL="echo -en \\033[${RES_COL}G"
12 | # terminal sequence to set color to a 'success' color (currently: green)
13 | SETCOLOR_SUCCESS="echo -en \\033[0;32m"
14 | # terminal sequence to set color to a 'failure' color (currently: red)
15 | SETCOLOR_FAILURE="echo -en \\033[0;31m"
16 | # terminal sequence to set color to a 'warning' color (currently: yellow)
17 | SETCOLOR_WARNING="echo -en \\033[0;33m"
18 | # terminal sequence to reset to the default color.
19 | SETCOLOR_NORMAL="echo -en \\033[0;39m"
20 | # Set to anything other than 'no' to allow hotkey interactive startup...
21 | PROMPT={{ 'yes' if (os_security_init_prompt|bool) else 'no' }}
22 | # Set to 'yes' to allow probing for devices with swap signatures
23 | AUTOSWAP=no
24 | # What ttys should gettys be started on?
25 | ACTIVE_CONSOLES=/dev/tty[1-6]
26 | # Set to '/sbin/sulogin' to prompt for password on single-user mode
27 | # Set to '/sbin/sushell' otherwise
28 | SINGLE={{ '/sbin/sulogin' if os_security_init_single else '/sbin/sushell' }}
29 |
30 | # NSA 2.2.4.1 Set Daemon umask
31 | umask 027
32 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/usr/share/pam-configs/pam_passwdqd.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | Name: passwdqc password strength enforcement
4 | Default: yes
5 | Priority: 1024
6 | Conflicts: cracklib
7 | Password-Type: Primary
8 | Password:
9 | requisite pam_passwdqc.so {{ os_auth_pam_passwdqc_options }}
10 |
--------------------------------------------------------------------------------
/roles/os-harden/templates/usr/share/pam-configs/pam_tally2.j2:
--------------------------------------------------------------------------------
1 | {{ ansible_managed | comment }}
2 |
3 | Name: tally2 lockout after failed attempts enforcement
4 | Default: yes
5 | Priority: 1024
6 | Conflicts: cracklib
7 | Auth-Type: Primary
8 | Auth-Initial:
9 | required pam_tally2.so deny={{ os_auth_retries }} onerr=fail unlock_time={{ os_auth_lockout_time }}
10 | Account-Type: Primary
11 | Account-Initial:
12 | required pam_tally2.so
13 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/Amazon.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # system accounts that do not get their login disabled and pasword changed
3 | os_always_ignore_users: ['root', 'sync', 'shutdown', 'halt', 'ec2-user']
4 |
5 | sysctl_rhel_config:
6 | # ExecShield protection against buffer overflows
7 | kernel.exec-shield: 1
8 | # Syncookies is used to prevent SYN-flooding attacks.
9 | net.ipv4.tcp_syncookies: 1
10 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/Archlinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | os_nologin_shell_path: '/sbin/nologin'
4 |
5 | os_shadow_perms:
6 | owner: root
7 | group: root
8 | mode: '0600'
9 |
10 | os_passwd_perms:
11 | owner: root
12 | group: root
13 | mode: '0644'
14 |
15 | os_env_umask: '027'
16 |
17 | os_auth_uid_min: 1000
18 | os_auth_gid_min: 1000
19 | os_auth_sys_uid_min: 500
20 | os_auth_sys_uid_max: 999
21 | os_auth_sys_gid_min: 500
22 | os_auth_sys_gid_max: 999
23 |
24 | modprobe_package: 'kmod'
25 | auditd_package: 'audit'
26 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | os_packages_pam_ccreds: 'libpam-ccreds'
4 | os_packages_pam_passwdqc: 'libpam-passwdqc'
5 | os_packages_pam_cracklib: 'libpam-cracklib'
6 | os_nologin_shell_path: '/usr/sbin/nologin'
7 |
8 | # Different distros use different standards for /etc/shadow perms, e.g.
9 | # RHEL derivatives use root:root 0000, whereas Debian-based use root:shadow 0640.
10 | # You must provide key/value pairs for owner, group, and mode if overriding.
11 | os_shadow_perms:
12 | owner: root
13 | group: shadow
14 | mode: '0640'
15 |
16 | os_passwd_perms:
17 | owner: root
18 | group: root
19 | mode: '0644'
20 |
21 | os_env_umask: '027'
22 |
23 | os_auth_uid_min: 1000
24 | os_auth_gid_min: 1000
25 | os_auth_sys_uid_min: 100
26 | os_auth_sys_uid_max: 999
27 | os_auth_sys_gid_min: 100
28 | os_auth_sys_gid_max: 999
29 |
30 | # defaults for useradd
31 | os_useradd_mail_dir: /var/mail
32 |
33 | modprobe_package: 'kmod'
34 | auditd_package: 'auditd'
35 |
36 | tally2_path: '/usr/share/pam-configs/tally2'
37 | passwdqc_path: '/usr/share/pam-configs/passwdqc'
38 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/Fedora.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | os_packages_pam_ccreds: 'pam_ccreds'
4 | os_packages_pam_passwdqc: 'pam_passwdqc'
5 | os_packages_pam_cracklib: 'pam_cracklib'
6 | os_nologin_shell_path: '/sbin/nologin'
7 |
8 | # Different distros use different standards for /etc/shadow perms, e.g.
9 | # RHEL derivatives use root:root 0000, whereas Debian-based use root:shadow 0640.
10 | # You must provide key/value pairs for owner, group, and mode if overriding.
11 | os_shadow_perms:
12 | owner: root
13 | group: root
14 | mode: '0000'
15 |
16 | os_passwd_perms:
17 | owner: root
18 | group: root
19 | mode: '0644'
20 |
21 | os_env_umask: '027'
22 |
23 | os_auth_uid_min: 1000
24 | os_auth_gid_min: 1000
25 | os_auth_sys_uid_min: 201
26 | os_auth_sys_uid_max: 999
27 | os_auth_sys_gid_min: 201
28 | os_auth_sys_gid_max: 999
29 |
30 | modprobe_package: 'module-init-tools'
31 | auditd_package: 'audit'
32 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/Oracle Linux.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | os_packages_pam_ccreds: 'pam_ccreds'
4 | os_packages_pam_passwdqc: 'pam_passwdqc'
5 | os_packages_pam_cracklib: 'pam_cracklib'
6 | os_nologin_shell_path: '/sbin/nologin'
7 |
8 | # Different distros use different standards for /etc/shadow perms, e.g.
9 | # RHEL derivatives use root:root 0000, whereas Debian-based use root:shadow 0640.
10 | # You must provide key/value pairs for owner, group, and mode if overriding.
11 | os_shadow_perms:
12 | owner: root
13 | group: root
14 | mode: '0000'
15 |
16 | os_passwd_perms:
17 | owner: root
18 | group: root
19 | mode: '0644'
20 |
21 | os_env_umask: '077'
22 |
23 | os_auth_uid_min: 1000
24 | os_auth_gid_min: 1000
25 | os_auth_sys_uid_min: 201
26 | os_auth_sys_uid_max: 999
27 | os_auth_sys_gid_min: 201
28 | os_auth_sys_gid_max: 999
29 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/RedHat-6.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | sysctl_rhel_config:
4 | # ExecShield protection against buffer overflows
5 | kernel.exec-shield: 1
6 | # Syncookies is used to prevent SYN-flooding attacks.
7 | net.ipv4.tcp_syncookies: 1
8 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | os_packages_pam_ccreds: 'pam_ccreds'
4 | os_packages_pam_passwdqc: 'pam_passwdqc'
5 | os_packages_pam_cracklib: 'pam_cracklib'
6 | os_nologin_shell_path: '/sbin/nologin'
7 |
8 | # Different distros use different standards for /etc/shadow perms, e.g.
9 | # RHEL derivatives use root:root 0000, whereas Debian-based use root:shadow 0640.
10 | # You must provide key/value pairs for owner, group, and mode if overriding.
11 | os_shadow_perms:
12 | owner: root
13 | group: root
14 | mode: '0000'
15 |
16 | os_passwd_perms:
17 | owner: root
18 | group: root
19 | mode: '0644'
20 |
21 | os_env_umask: '077'
22 |
23 | os_auth_uid_min: 1000
24 | os_auth_gid_min: 1000
25 | os_auth_sys_uid_min: 201
26 | os_auth_sys_uid_max: 999
27 | os_auth_sys_gid_min: 201
28 | os_auth_sys_gid_max: 999
29 |
30 | # defaults for useradd
31 | os_useradd_mail_dir: /var/spool/mail
32 | os_useradd_create_home: true
33 |
34 | modprobe_package: 'module-init-tools'
35 | auditd_package: 'audit'
36 |
--------------------------------------------------------------------------------
/roles/os-harden/vars/Suse.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | os_packages_pam_ccreds: 'pam_ccreds'
4 | os_packages_pam_passwdqc: 'pam_passwdqc'
5 | os_packages_pam_cracklib: 'cracklib'
6 | os_nologin_shell_path: '/sbin/nologin'
7 |
8 | # Different distros use different standards for /etc/shadow perms, e.g.
9 | # RHEL derivatives use root:root 0000, whereas Debian-based use root:shadow 0640.
10 | # You must provide key/value pairs for owner, group, and mode if overriding.
11 | os_shadow_perms:
12 | owner: root
13 | group: shadow
14 | mode: '0640'
15 |
16 | os_passwd_perms:
17 | owner: root
18 | group: root
19 | mode: '0644'
20 |
21 | os_env_umask: '027'
22 |
23 | os_auth_uid_min: 1000
24 | os_auth_gid_min: 1000
25 | os_auth_sys_uid_min: 100
26 | os_auth_sys_uid_max: 499
27 | os_auth_sys_gid_min: 100
28 | os_auth_sys_gid_max: 499
29 |
30 | # defaults for useradd
31 | os_useradd_create_home: false
32 |
33 | modprobe_package: 'kmod-compat'
34 | auditd_package: 'audit'
35 |
--------------------------------------------------------------------------------
/roles/prepare/files/sctp.conf:
--------------------------------------------------------------------------------
1 | # put sctp into blacklist
2 | install sctp /bin/true
3 |
--------------------------------------------------------------------------------
/roles/prepare/tasks/offline.yml:
--------------------------------------------------------------------------------
1 | # 离线安装基础系统包
2 | # 仅当机器无法从本地yum/apt源安装时使用
3 | # 执行前需要预先运行类似 ./ezdown -P debian_10 下载离线安装包
4 |
5 | - name: 检查是否已下载离线软件包{{ SYS_PKG_NAME }}
6 | stat:
7 | path: "{{ base_dir }}/down/packages/{{ SYS_PKG_NAME }}"
8 | register: pkg
9 | connection: local
10 | run_once: true
11 |
12 | - name: 准备离线安装包目录
13 | file: name=/opt/kube/packages/basic state=directory
14 |
15 | - block:
16 | - name: 分发离线软件包{{ SYS_PKG_NAME }}
17 | copy:
18 | src: "{{ base_dir }}/down/packages/{{ SYS_PKG_NAME }}"
19 | dest: "/opt/kube/packages/basic/{{ SYS_PKG_NAME }}"
20 |
21 | # 离线安装包可能需要安装多次才能成功
22 | - name: 安装离线软件包{{ SYS_PKG_NAME }}
23 | shell: 'cd /opt/kube/packages/basic && tar zxf {{ SYS_PKG_NAME }} && \
24 | dpkg -i *.deb > /tmp/install_basic.log 2>&1'
25 | register: install_info
26 | until: not install_info.failed
27 | retries: 3
28 | delay: 1
29 | when: 'ansible_distribution_file_variety in ["Debian"]'
30 |
31 | - name: 安装离线软件包{{ SYS_PKG_NAME }}
32 | shell: 'cd /opt/kube/packages/basic && tar zxf {{ SYS_PKG_NAME }} && \
33 | rpm -Uvh --force --nodeps *.rpm > /tmp/install_basic.log 2>&1'
34 | when: 'ansible_distribution_file_variety in ["RedHat", "SUSE"]'
35 | when: 'pkg.stat.exists|bool'
36 | ignore_errors: true
37 |
--------------------------------------------------------------------------------
/roles/prepare/tasks/suse.yml:
--------------------------------------------------------------------------------
1 | - name: 安装基础软件包
2 | package:
3 | name:
4 | - bash-completion # bash命令补全工具,需要重新登录服务器生效
5 | - conntrack-tools # ipvs 模式需要
6 | - ipset # ipvs 模式需要
7 | - ipvsadm # ipvs 模式需要
8 | - libseccomp2 # 安装containerd需要
9 | - nfs-client # 挂载nfs 共享文件需要 (创建基于 nfs的PV 需要)
10 | - psmisc # 安装psmisc 才能使用命令killall,keepalive的监测脚本需要
11 | - rsync # 文件同步工具,分发证书等配置文件需要
12 | - socat # 用于port forwarding
13 | state: present
14 | when: 'INSTALL_SOURCE != "offline"'
15 |
16 | # 离线安装基础软件包
17 | - import_tasks: offline.yml
18 | when: 'INSTALL_SOURCE == "offline"'
19 |
--------------------------------------------------------------------------------
/roles/prepare/templates/10-k8s-modules.conf.j2:
--------------------------------------------------------------------------------
1 | br_netfilter
2 | ip_vs
3 | ip_vs_rr
4 | ip_vs_wrr
5 | ip_vs_sh
6 | nf_conntrack
7 | {% if 'NoFound' not in NF_CONNTRACK_IPV4.stdout %}
8 | nf_conntrack_ipv4
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/roles/prepare/templates/30-k8s-ulimits.conf.j2:
--------------------------------------------------------------------------------
1 | [Manager]
2 | DefaultLimitCORE=infinity
3 | DefaultLimitNOFILE=100000
4 | DefaultLimitNPROC=100000
5 |
--------------------------------------------------------------------------------
/roles/prepare/templates/95-k8s-journald.conf.j2:
--------------------------------------------------------------------------------
1 | [Journal]
2 | # 持久化保存到磁盘
3 | Storage=persistent
4 |
5 | # 最大占用空间 2G
6 | SystemMaxUse=2G
7 |
8 | # 单日志文件最大 200M
9 | SystemMaxFileSize=200M
10 |
11 | # 日志保存时间 2 周
12 | MaxRetentionSec=2week
13 |
14 | # 禁止转发
15 | ForwardToSyslog=no
16 | ForwardToWall=no
17 |
--------------------------------------------------------------------------------
/roles/prepare/templates/95-k8s-sysctl.conf.j2:
--------------------------------------------------------------------------------
1 | net.ipv4.ip_forward = 1
2 | net.bridge.bridge-nf-call-iptables = 1
3 | net.bridge.bridge-nf-call-ip6tables = 1
4 | net.bridge.bridge-nf-call-arptables = 1
5 | net.ipv4.tcp_tw_reuse = 0
6 | net.core.somaxconn = 32768
7 | net.netfilter.nf_conntrack_max=1000000
8 | vm.swappiness = 0
9 | vm.max_map_count=655360
10 | fs.file-max=6553600
11 | {% if PROXY_MODE == "ipvs" %}
12 | net.ipv4.tcp_keepalive_time = 600
13 | net.ipv4.tcp_keepalive_intvl = 30
14 | net.ipv4.tcp_keepalive_probes = 10
15 | {% endif %}
16 |
--------------------------------------------------------------------------------
/roles/prepare/vars/main.yml:
--------------------------------------------------------------------------------
1 | # 离线软件包名
2 |
3 | SYS_PKG_NAME: "{{ ansible_distribution|lower|replace(' ', '_') }}_{{ ansible_distribution_major_version }}.tgz"
4 |
--------------------------------------------------------------------------------