├── .gitignore ├── 01-base.yml ├── 02-runtime.yml ├── 03-kubernetes-component.yml ├── 04-load-balancer.yml ├── 05-certificates.yml ├── 06-etcd.yml ├── 07-kubernetes-master.yml ├── 08-kubernetes-worker.yml ├── 09-plugin-network.yml ├── 10-plugin-cluster-storage-add-worker.yml ├── 10-plugin-cluster-storage-cinder.yml ├── 10-plugin-cluster-storage-external-ceph-block.yml ├── 10-plugin-cluster-storage-external-cephfs.yml ├── 10-plugin-cluster-storage-glusterfs.yml ├── 10-plugin-cluster-storage-nfs.yml ├── 10-plugin-cluster-storage-oceanstor.yml ├── 10-plugin-cluster-storage-rook-ceph.yml ├── 10-plugin-cluster-storage-vsphere.yml ├── 11-helm-install.yml ├── 12-npd.yml ├── 13-metrics-server.yml ├── 14-ingress-controller.yml ├── 15-post.yml ├── 16-gpu-operator.yml ├── 17-dns-cache.yml ├── 18-istio.yml ├── 19-metallb.yml ├── 90-init-cluster.yml ├── 91-add-worker-01-base.yml ├── 91-add-worker-02-runtime.yml ├── 91-add-worker-03-kubernetes-component.yml ├── 91-add-worker-04-load-balancer.yml ├── 91-add-worker-05-certificates.yml ├── 91-add-worker-06-kubernetes-worker.yml ├── 91-add-worker-07-network.yml ├── 91-add-worker-08-post.yml ├── 91-add-worker.yml ├── 92-upgrade-cluster.yml ├── 93-certificates-renew.yml ├── 94-backup-cluster.yml ├── 95-restore-cluster.yml ├── 96-remove-worker.yml ├── 97-reset-worker.yml ├── 99-reset-cluster.yml ├── LICENSE ├── README.md ├── hosts.hostname.ini ├── hosts.ip.ini ├── roles ├── backup │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── chrony │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── client.conf.j2 │ │ └── server.conf.j2 ├── etcd │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etcd.service.j2 ├── helm │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── helm-rbac.yaml.j2 ├── kube-config │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── kubeconfig.yml │ │ └── main.yml ├── kube-master │ ├── defaults │ │ └── main.yaml │ ├── tasks │ │ ├── kubeadm-config.yml │ │ ├── main.yml │ │ ├── master-init.yml │ │ └── master-join.yml │ └── templates │ │ ├── apiserver-audit-policy.yaml.j2 │ │ ├── kubeadm-controlplane-init.v1beta1.yaml.j2 │ │ ├── kubeadm-controlplane-init.v1beta2.yaml.j2 │ │ ├── kubeadm-controlplane-join.v1beta1.yaml.j2 │ │ ├── kubeadm-controlplane-join.v1beta2.yaml.j2 │ │ ├── kubelet-certificates-renewal.yaml.j2 │ │ ├── kubelet-config.v1beta1.yaml.j2 │ │ ├── pod-security-policy.yaml.j2 │ │ └── secrets-encryption.yaml.j2 ├── kube-worker │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── kubeadm-join.v1beta1.yaml.j2 │ │ ├── kubeadm-join.v1beta2.yaml.j2 │ │ └── kubelet-config.v1beta1.yaml.j2 ├── load-balancer │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── external.yml │ │ ├── internal.yml │ │ └── main.yml │ └── templates │ │ ├── haproxy │ │ └── haproxy.cfg.j2 │ │ ├── keepalived-backup.conf.j2 │ │ └── keepalived-master.conf.j2 ├── plugins │ ├── cluster-storage │ │ ├── cinder │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── disable.yml │ │ │ │ ├── enable.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── cinder-csi-cloud-config-secret.yml.j2 │ │ │ │ ├── cinder-csi-cloud-config.j2 │ │ │ │ ├── cinder-csi-controllerplugin-rbac.yml.j2 │ │ │ │ ├── cinder-csi-controllerplugin.yml.j2 │ │ │ │ ├── cinder-csi-driver.yml.j2 │ │ │ │ ├── cinder-csi-nodeplugin-rbac.yml.j2 │ │ │ │ ├── cinder-csi-nodeplugin.yml.j2 │ │ │ │ ├── cinder-csi-poddisruptionbudget.yml.j2 │ │ │ │ └── storageclass.yaml.j2 │ │ ├── external-ceph-block │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── disable.yml │ │ │ │ ├── enable.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── ceph-rbd-provisioner.yaml.j2 │ │ │ │ └── storageclass.yaml.j2 │ │ ├── external-cephfs │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── disable.yml │ │ │ │ ├── enable.yml │ │ │ │ └── main.yaml │ │ │ └── templates │ │ │ │ ├── ceph-fs-provisioner.yaml.j2 │ │ │ │ └── storageclass.yaml.j2 │ │ ├── glusterfs │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── storageclass.yaml.j2 │ │ ├── nfs │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── disable.yml │ │ │ │ ├── enable.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── nfs-client-provisioner.yaml.j2 │ │ ├── oceanstor │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── disable.yml │ │ │ │ ├── enable.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── huawei-csi-configmap.yaml.j2 │ │ │ │ ├── huawei-csi-controller.yaml.j2 │ │ │ │ ├── huawei-csi-multi-controller.yaml.j2 │ │ │ │ ├── huawei-csi-node.yaml.j2 │ │ │ │ ├── huawei-csi-rbac-for-multi-controller.yaml.j2 │ │ │ │ └── huawei-csi-rbac.yaml.j2 │ │ ├── rook-ceph │ │ │ ├── defaults │ │ │ │ └── main.yaml │ │ │ ├── tasks │ │ │ │ ├── disable.yml │ │ │ │ ├── enable.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── cluster.yaml.j2 │ │ │ │ ├── common.yaml.j2 │ │ │ │ ├── crds.yaml.j2 │ │ │ │ ├── filesystem.yaml.j2 │ │ │ │ ├── operator.yaml.j2 │ │ │ │ ├── replicapool.yaml.j2 │ │ │ │ └── storageclass.yaml.j2 │ │ └── vsphere │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ ├── disable.yml │ │ │ ├── enable.yml │ │ │ └── main.yml │ │ │ └── templates │ │ │ ├── setup.sh.j2 │ │ │ ├── vsphere-csi-cloud-config.j2 │ │ │ ├── vsphere-csi-controller-config.yml.j2 │ │ │ ├── vsphere-csi-controller-deployment.yml.j2 │ │ │ ├── vsphere-csi-controller-rbac.yml.j2 │ │ │ ├── vsphere-csi-controller-service.yml.j2 │ │ │ ├── vsphere-csi-driver.yml.j2 │ │ │ ├── vsphere-csi-node-rbac.yml.j2 │ │ │ └── vsphere-csi-node.yml.j2 │ ├── dns-cache │ │ ├── tasks │ │ │ ├── disable.yml │ │ │ ├── enable.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── iptables.yaml.j2 │ │ │ └── ipvs.yaml.j2 │ ├── gpu │ │ └── gpu-operator │ │ │ ├── files │ │ │ └── gpu-operator-v1.7.0.tgz │ │ │ └── tasks │ │ │ ├── disable.yml │ │ │ ├── enable.yml │ │ │ └── main.yml │ ├── ingress-controller │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── disable-nginx.yml │ │ │ ├── disable-traefik.yml │ │ │ ├── enable-nginx.yml │ │ │ ├── enable-traefik.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── nginx-ingress-controller-v1.1.1.yaml.j2 │ │ │ ├── nginx-ingress-controller.yaml.j2 │ │ │ └── traefik-ingress-controller │ │ │ ├── crds.yaml.j2 │ │ │ └── traefik-ingress-controller.yaml.j2 │ ├── istio │ │ ├── files │ │ │ ├── istio-base-1.1.0.tgz │ │ │ ├── istio-discovery-1.2.0.tgz │ │ │ ├── istio-egress-1.1.0.tgz │ │ │ └── istio-ingress-1.1.0.tgz │ │ └── tasks │ │ │ ├── disable.yml │ │ │ ├── enable.yml │ │ │ └── main.yml │ ├── metallb │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── metallb-0.13.7.tgz │ │ ├── tasks │ │ │ ├── disable.yml │ │ │ ├── enable.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── metallb-config-IPAddressPool.yaml.j2 │ │ │ └── metallb-config-L2Advertisement.yaml.j2 │ ├── metrics-server │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── disable.yml │ │ │ ├── enable.yml │ │ │ └── main.yml │ │ └── templates │ │ │ └── metrics-server.yaml.j2 │ ├── network-plugins │ │ ├── network-deploy │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── calico.yml │ │ │ │ ├── cilium.yml │ │ │ │ ├── flannel.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── calico │ │ │ │ ├── calico-typha-v3.16.yaml.j2 │ │ │ │ ├── calico-typha-v3.18.yaml.j2 │ │ │ │ ├── calico-typha-v3.21.yaml.j2 │ │ │ │ ├── calico-typha.yaml.j2 │ │ │ │ ├── crds-v3.16.yaml.j2 │ │ │ │ ├── crds-v3.18.yaml.j2 │ │ │ │ ├── crds-v3.21.yaml.j2 │ │ │ │ └── crds.yaml.j2 │ │ │ │ ├── cilium │ │ │ │ └── cilium.yaml.j2 │ │ │ │ └── flannel │ │ │ │ └── kube-flannel.yaml.j2 │ │ └── network-prepare │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ └── tasks │ │ │ ├── cilium.yml │ │ │ └── main.yml │ └── npd │ │ ├── defaults │ │ └── main.yml │ │ ├── tasks │ │ ├── disable.yml │ │ ├── enable.yml │ │ └── main.yml │ │ └── templates │ │ ├── npd-config.yaml.j2 │ │ └── npd-ds.yaml.j2 ├── post │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── ko-admin.yaml.j2 ├── prepare │ ├── base │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── centos.yml │ │ │ ├── common.yml │ │ │ ├── debian.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── 10-k8s-modules.conf.j2 │ │ │ ├── 30-k8s-ulimits.conf.j2 │ │ │ ├── 99-sysctl-ko.conf.j2 │ │ │ └── sunrpc.conf.j2 │ ├── containerd │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── config.toml.j2 │ │ │ ├── containerd.service.j2 │ │ │ └── crictl.yaml.j2 │ ├── docker │ │ ├── defaults │ │ │ └── main.yaml │ │ ├── files │ │ │ ├── docker │ │ │ └── docker-tag │ │ ├── tasks │ │ │ └── main.yaml │ │ └── templates │ │ │ ├── daemon.json.j2 │ │ │ └── docker.service.j2 │ ├── etcd-certificates │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── certs_stat.yml │ │ │ ├── distribute.yml │ │ │ ├── generate.yml │ │ │ └── main.yml │ │ └── templates │ │ │ └── etcd-openssl.cnf.j2 │ ├── kube-certificates │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── certs_stat.yml │ │ │ ├── common.yml │ │ │ ├── distribute.yml │ │ │ └── main.yml │ │ └── templates │ │ │ └── kube-openssl.cnf.j2 │ ├── kubernetes │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── kubernetes.gpg │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── 10-kubeadm.conf.j2 │ │ │ ├── kubelet.config.j2 │ │ │ └── kubelet.service.j2 │ ├── nameserver │ │ └── tasks │ │ │ └── main.yml │ └── repository │ │ ├── defaults │ │ └── main.yaml │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ ├── kubeops.repo-euler-amd64.j2 │ │ ├── kubeops.repo-euler-arm64.j2 │ │ ├── kubeops.repo-kylin-amd64-deb.j2 │ │ ├── kubeops.repo-kylin-amd64-rpm.j2 │ │ ├── kubeops.repo-kylin-arm64-deb.j2 │ │ ├── kubeops.repo-kylin-arm64-rpm.j2 │ │ ├── kubeops.repo-linux-amd64.j2 │ │ ├── kubeops.repo-linux-arm64.j2 │ │ ├── kubeops.repo-openeuler.j2 │ │ ├── kubeops.repo-ubuntu-amd64.j2 │ │ └── kubeops.repo-ubuntu-arm64.j2 ├── remove │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── reset │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── restore │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml └── upgrade │ ├── defaults │ └── main.yml │ ├── files │ └── kubernetes.gpg │ └── tasks │ ├── centos.yml │ ├── common.yml │ └── main.yml └── variables.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | *.iml 3 | target/ 4 | .DS_Store -------------------------------------------------------------------------------- /01-base.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - prepare/repository 7 | - prepare/base 8 | - prepare/nameserver 9 | - chrony -------------------------------------------------------------------------------- /02-runtime.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - { role: prepare/docker, when: "container_runtime == 'docker'" } 7 | - { role: prepare/containerd, when: "container_runtime == 'containerd'" } -------------------------------------------------------------------------------- /03-kubernetes-component.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - prepare/kubernetes -------------------------------------------------------------------------------- /04-load-balancer.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - load-balancer -------------------------------------------------------------------------------- /05-certificates.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - prepare/etcd-certificates 7 | - prepare/kube-certificates -------------------------------------------------------------------------------- /06-etcd.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master 2 | roles: 3 | - etcd -------------------------------------------------------------------------------- /07-kubernetes-master.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master 2 | roles: 3 | - kube-master -------------------------------------------------------------------------------- /08-kubernetes-worker.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-worker 3 | - new-worker 4 | roles: 5 | - kube-worker -------------------------------------------------------------------------------- /09-plugin-network.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - plugins/network-plugins/network-prepare 7 | 8 | - hosts: kube-master[0] 9 | gather_facts: false 10 | roles: 11 | - plugins/network-plugins/network-deploy 12 | -------------------------------------------------------------------------------- /10-plugin-cluster-storage-add-worker.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - { role: plugins/cluster-storage/nfs, when: "enable_nfs_provisioner == 'enable'" } 4 | - { role: plugins/cluster-storage/glusterfs, when: "enable_gfs_provisioner == 'enable'" } 5 | - { role: plugins/cluster-storage/external-ceph-block, when: "enable_external_ceph_block_provisioner == 'enable'" } 6 | - { role: plugins/cluster-storage/external-cephfs, when: "enable_external_cephfs_provisioner == 'enable'" } -------------------------------------------------------------------------------- /10-plugin-cluster-storage-cinder.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | roles: 3 | - plugins/cluster-storage/cinder -------------------------------------------------------------------------------- /10-plugin-cluster-storage-external-ceph-block.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - plugins/cluster-storage/external-ceph-block -------------------------------------------------------------------------------- /10-plugin-cluster-storage-external-cephfs.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | roles: 3 | - plugins/cluster-storage/external-cephfs -------------------------------------------------------------------------------- /10-plugin-cluster-storage-glusterfs.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - plugins/cluster-storage/glusterfs -------------------------------------------------------------------------------- /10-plugin-cluster-storage-nfs.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - plugins/cluster-storage/nfs -------------------------------------------------------------------------------- /10-plugin-cluster-storage-oceanstor.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - plugins/cluster-storage/oceanstor -------------------------------------------------------------------------------- /10-plugin-cluster-storage-rook-ceph.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | roles: 3 | - plugins/cluster-storage/rook-ceph -------------------------------------------------------------------------------- /10-plugin-cluster-storage-vsphere.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | roles: 3 | - plugins/cluster-storage/vsphere -------------------------------------------------------------------------------- /11-helm-install.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master 2 | gather_facts: false 3 | roles: 4 | - helm -------------------------------------------------------------------------------- /12-npd.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - plugins/npd -------------------------------------------------------------------------------- /13-metrics-server.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - plugins/metrics-server -------------------------------------------------------------------------------- /14-ingress-controller.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - plugins/ingress-controller -------------------------------------------------------------------------------- /15-post.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | gather_facts: false 6 | roles: 7 | - post 8 | 9 | - hosts: kube-master[0] 10 | gather_facts: false 11 | roles: 12 | - { role: plugins/dns-cache, when: "enable_dns_cache == 'enable' and component_created_by == 'cluster'" } 13 | 14 | - hosts: kube-master[0] 15 | gather_facts: false 16 | roles: 17 | - { role: plugins/gpu/gpu-operator, when: "container_runtime == 'docker' and enable_gpu == 'enable' and component_created_by == 'cluster'" } 18 | -------------------------------------------------------------------------------- /16-gpu-operator.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - { role: plugins/gpu/gpu-operator } -------------------------------------------------------------------------------- /17-dns-cache.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - { role: plugins/dns-cache } -------------------------------------------------------------------------------- /18-istio.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - { role: plugins/istio } -------------------------------------------------------------------------------- /19-metallb.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-master[0] 2 | gather_facts: false 3 | roles: 4 | - plugins/metallb -------------------------------------------------------------------------------- /90-init-cluster.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - prepare/repository 7 | - prepare/base 8 | - prepare/nameserver 9 | - chrony 10 | 11 | - hosts: 12 | - kube-master 13 | - kube-worker 14 | - new-worker 15 | roles: 16 | - { role: prepare/docker, when: "container_runtime == 'docker'" } 17 | - { role: prepare/containerd, when: "container_runtime == 'containerd'" } 18 | 19 | - hosts: 20 | - kube-master 21 | - kube-worker 22 | - new-worker 23 | roles: 24 | - prepare/kubernetes 25 | 26 | - hosts: 27 | - kube-master 28 | - kube-worker 29 | - new-worker 30 | roles: 31 | - load-balancer 32 | 33 | - hosts: 34 | - kube-master 35 | - kube-worker 36 | - new-worker 37 | roles: 38 | - prepare/etcd-certificates 39 | - prepare/kube-certificates 40 | 41 | - hosts: kube-master 42 | gather_facts: false 43 | roles: 44 | - etcd 45 | 46 | - hosts: kube-master 47 | roles: 48 | - kube-master 49 | 50 | - hosts: 51 | - kube-worker 52 | - new-worker 53 | roles: 54 | - kube-worker 55 | 56 | - hosts: 57 | - kube-master 58 | - kube-worker 59 | - new-worker 60 | roles: 61 | - plugins/network-plugins/network-prepare 62 | 63 | - hosts: kube-master[0] 64 | gather_facts: false 65 | roles: 66 | - plugins/network-plugins/network-deploy 67 | 68 | - hosts: kube-master 69 | gather_facts: false 70 | roles: 71 | - helm 72 | 73 | - hosts: kube-master[0] 74 | gather_facts: false 75 | roles: 76 | - plugins/metrics-server 77 | 78 | - hosts: kube-master[0] 79 | gather_facts: false 80 | roles: 81 | - plugins/ingress-controller 82 | 83 | - hosts: 84 | - kube-master 85 | - kube-worker 86 | - new-worker 87 | gather_facts: false 88 | roles: 89 | - post 90 | 91 | - hosts: kube-master[0] 92 | gather_facts: false 93 | roles: 94 | - { role: plugins/dns-cache, when: "enable_dns_cache == 'enable' and component_created_by == 'cluster'" } 95 | 96 | - hosts: kube-master[0] 97 | gather_facts: false 98 | roles: 99 | - { role: plugins/gpu/gpu-operator, when: "container_runtime == 'docker' and enable_gpu == 'enable' and component_created_by == 'cluster'" } 100 | -------------------------------------------------------------------------------- /91-add-worker-01-base.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - prepare/repository 4 | - prepare/base 5 | - chrony 6 | 7 | - hosts: 8 | - kube-master 9 | - kube-worker 10 | - new-worker 11 | roles: 12 | - prepare/nameserver -------------------------------------------------------------------------------- /91-add-worker-02-runtime.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - { role: prepare/docker, when: "container_runtime == 'docker'" } 4 | - { role: prepare/containerd, when: "container_runtime == 'containerd'" } -------------------------------------------------------------------------------- /91-add-worker-03-kubernetes-component.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - prepare/kubernetes -------------------------------------------------------------------------------- /91-add-worker-04-load-balancer.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - load-balancer -------------------------------------------------------------------------------- /91-add-worker-05-certificates.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - prepare/kube-certificates -------------------------------------------------------------------------------- /91-add-worker-06-kubernetes-worker.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - kube-worker -------------------------------------------------------------------------------- /91-add-worker-07-network.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - plugins/network-plugins/network-prepare 7 | 8 | - hosts: kube-master[0] 9 | gather_facts: false 10 | roles: 11 | - plugins/network-plugins/network-deploy 12 | -------------------------------------------------------------------------------- /91-add-worker-08-post.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | gather_facts: false 3 | roles: 4 | - post 5 | 6 | - hosts: new-worker 7 | gather_facts: false 8 | tasks: 9 | - block: 10 | - name: Get ClusterIP of kube-dns 11 | shell: "{{ bin_dir }}/kubectl get svc -n kube-system | grep kube-dns | grep -v upstream | awk '{ print $3 }'" 12 | register: kube_dns_clusterip 13 | run_once: true 14 | 15 | - name: Modify the kubelet config file 16 | lineinfile: 17 | path: "/var/lib/kubelet/config.yaml" 18 | regexp: '- {{ kube_dns_clusterip.stdout }}' 19 | line: "- {{ pillar_local_dns }}" 20 | 21 | - name: Restart kubelet 22 | service: 23 | name: kubelet 24 | state: restarted 25 | when: 26 | - kube_proxy_mode == "ipvs" 27 | - enable_dns_cache == "enable" 28 | - component_created_by == "cluster" 29 | 30 | - hosts: kube-master[0] 31 | gather_facts: false 32 | roles: 33 | - { role: plugins/gpu/gpu-operator, when: "container_runtime == 'docker' and enable_gpu == 'enable' and component_created_by == 'cluster'" } -------------------------------------------------------------------------------- /91-add-worker.yml: -------------------------------------------------------------------------------- 1 | - hosts: new-worker 2 | roles: 3 | - prepare/repository 4 | - prepare/base 5 | - chrony 6 | 7 | - hosts: 8 | - kube-master 9 | - kube-worker 10 | - new-worker 11 | roles: 12 | - prepare/nameserver 13 | 14 | - hosts: new-worker 15 | roles: 16 | - { role: prepare/docker, when: "container_runtime == 'docker'" } 17 | - { role: prepare/containerd, when: "container_runtime == 'containerd'" } 18 | 19 | - hosts: new-worker 20 | roles: 21 | - prepare/kubernetes 22 | 23 | - hosts: new-worker 24 | roles: 25 | - load-balancer 26 | 27 | - hosts: 28 | - kube-master 29 | - kube-worker 30 | - new-worker 31 | roles: 32 | - prepare/kube-certificates 33 | 34 | - hosts: new-worker 35 | gather_facts: false 36 | roles: 37 | - kube-worker 38 | 39 | - hosts: 40 | - kube-master 41 | - kube-worker 42 | - new-worker 43 | gather_facts: false 44 | roles: 45 | - plugins/network-plugins/network-prepare 46 | 47 | - hosts: kube-master[0] 48 | gather_facts: false 49 | roles: 50 | - plugins/network-plugins/network-deploy 51 | 52 | - hosts: new-worker 53 | gather_facts: false 54 | roles: 55 | - post 56 | 57 | - hosts: new-worker 58 | gather_facts: false 59 | tasks: 60 | - block: 61 | - name: Get ClusterIP of kube-dns 62 | shell: "{{ bin_dir }}/kubectl get svc -n kube-system | grep kube-dns | grep -v upstream | awk '{ print $3 }'" 63 | register: kube_dns_clusterip 64 | run_once: true 65 | 66 | - name: Modify the kubelet config file 67 | lineinfile: 68 | path: "/var/lib/kubelet/config.yaml" 69 | regexp: '- {{ kube_dns_clusterip.stdout }}' 70 | line: "- {{ pillar_local_dns }}" 71 | 72 | - name: Restart kubelet 73 | service: 74 | name: kubelet 75 | state: restarted 76 | when: 77 | - kube_proxy_mode == "ipvs" 78 | - enable_dns_cache == "enable" 79 | - component_created_by == "cluster" 80 | 81 | - hosts: kube-master[0] 82 | gather_facts: false 83 | roles: 84 | - { role: plugins/gpu/gpu-operator, when: "container_runtime == 'docker' and enable_gpu == 'enable' and component_created_by == 'cluster'" } 85 | -------------------------------------------------------------------------------- /92-upgrade-cluster.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - upgrade -------------------------------------------------------------------------------- /93-certificates-renew.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - etcd 3 | - kube-master 4 | roles: 5 | - prepare/etcd-certificates 6 | - prepare/kube-certificates -------------------------------------------------------------------------------- /94-backup-cluster.yml: -------------------------------------------------------------------------------- 1 | - hosts: etcd[0] 2 | gather_facts: false 3 | roles: 4 | - backup -------------------------------------------------------------------------------- /95-restore-cluster.yml: -------------------------------------------------------------------------------- 1 | - hosts: etcd 2 | gather_facts: false 3 | roles: 4 | - restore -------------------------------------------------------------------------------- /96-remove-worker.yml: -------------------------------------------------------------------------------- 1 | - hosts: del-worker 2 | gather_facts: false 3 | roles: 4 | - remove -------------------------------------------------------------------------------- /97-reset-worker.yml: -------------------------------------------------------------------------------- 1 | - hosts: del-worker 2 | roles: 3 | - reset -------------------------------------------------------------------------------- /99-reset-cluster.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - kube-master 3 | - kube-worker 4 | - new-worker 5 | roles: 6 | - reset -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # KubeOperator Ansible 脚本 2 | 3 | [KubeOperator](https://github.com/KubeOperator/KubeOperator) 是一个开源的轻量级 Kubernetes 发行版,专注于帮助企业规划、部署和运营生产级别的 K8s 集群。 4 | 5 | 该项目为 KubeOperator 安装包相关内容,包含了 KubeOperator 的安装脚本及默认配置文件等。 6 | 7 | ## 安装 netaddr 模块 8 | ``` 9 | pip install --no-cache-dir netaddr==0.7.19 -i https://mirrors.aliyun.com/pypi/simple/ 10 | ``` 11 | 12 | ## 执行安装 13 | ``` 14 | ansible-playbook -i hosts.hostname.ini -e @variables.yml 90-init-cluster.yml 15 | ``` 16 | 17 | ## 问题反馈 18 | 19 | 如果您在使用过程中遇到什么问题,或有进一步的需求需要反馈,请提交 GitHub Issue 到 [KubeOperator 项目的主仓库](https://github.com/KubeOperator/KubeOperator/issues) 20 | -------------------------------------------------------------------------------- /roles/backup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | -------------------------------------------------------------------------------- /roles/backup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create etcd backup directory 2 | file: 3 | name: "/etc/kubernetes/backup/etcd" 4 | state: directory 5 | 6 | - name: Backup etcd database 7 | shell: "cd /etc/kubernetes/backup/etcd && \ 8 | ETCDCTL_API=3 {{ bin_dir }}/etcdctl snapshot save {{ etcd_snapshot_name }}" 9 | 10 | - name: Fetch backup file 11 | fetch: 12 | src: "/etc/kubernetes/backup/etcd/{{ etcd_snapshot_name }}" 13 | dest: "/var/ko/data/backup/{{ cluster_name }}/{{ etcd_snapshot_name }}" 14 | flat: yes 15 | 16 | - name: Remove etcd temp backup file 17 | file: 18 | name: "/etc/kubernetes/backup/etcd/{{ etcd_snapshot_name }}" 19 | state: absent -------------------------------------------------------------------------------- /roles/chrony/defaults/main.yml: -------------------------------------------------------------------------------- 1 | local_network: "0.0.0.0/0" -------------------------------------------------------------------------------- /roles/chrony/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: prepare some dirs 2 | file: name={{ item }} state=directory 3 | with_items: 4 | - "/etc/chrony" 5 | - "/var/lib/chrony" 6 | - "/var/log/chrony" 7 | 8 | - block: 9 | - name: CentOS | Remove ntp server 10 | yum: 11 | name: ntp 12 | state: absent 13 | ignore_errors: true 14 | 15 | - name: CentOS | Install chrony 16 | yum: 17 | name: chrony 18 | state: present 19 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 20 | 21 | - block: 22 | - name: Debian | Remove ntp server 23 | apt: 24 | name: ntp 25 | state: absent 26 | ignore_errors: true 27 | 28 | - name: Debian | Install chrony 29 | apt: 30 | name: chrony 31 | state: present 32 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 33 | 34 | - block: 35 | - block: 36 | - name: CentOS | Config chrony server 37 | template: 38 | src: server.conf.j2 39 | dest: /etc/chrony.conf 40 | 41 | - name: CentOS | Start chrony server 42 | service: 43 | name: chronyd 44 | state: restarted 45 | enabled: yes 46 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 47 | 48 | - block: 49 | - name: Debian | Config chrony server 50 | template: 51 | src: server.conf.j2 52 | dest: /etc/chrony/chrony.conf 53 | 54 | - name: Debian | Start chrony server 55 | service: 56 | name: chrony 57 | state: restarted 58 | enabled: yes 59 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 60 | when: inventory_hostname == groups['chrony'][0] 61 | 62 | - block: 63 | - block: 64 | - name: CentOS | Config chrony client 65 | template: 66 | src: client.conf.j2 67 | dest: /etc/chrony.conf 68 | 69 | - name: CentOS | Start chrony client 70 | service: 71 | name: chronyd 72 | state: restarted 73 | enabled: yes 74 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 75 | 76 | - block: 77 | - name: Debian | Config chrony client 78 | template: 79 | src: client.conf.j2 80 | dest: /etc/chrony/chrony.conf 81 | 82 | - name: Debian | Start chrony client 83 | service: 84 | name: chrony 85 | state: restarted 86 | enabled: yes 87 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 88 | when: 'inventory_hostname != groups.chrony[0]' 89 | 90 | - name: Wait for Chronyd service to start 91 | shell: "systemctl is-active chronyd.service" 92 | register: svc_status 93 | until: '"active" in svc_status.stdout' 94 | retries: 3 95 | delay: 3 96 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 97 | 98 | - name: Wait for Chrony service to start 99 | shell: "systemctl is-active chrony.service" 100 | register: svc_status 101 | until: '"active" in svc_status.stdout' 102 | retries: 3 103 | delay: 3 104 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 105 | 106 | - name: Manually synchronize clocks using Chrony 107 | shell: "chronyc -a makestep" 108 | when: 'inventory_hostname != groups.chrony[0]' 109 | ignore_errors: true 110 | -------------------------------------------------------------------------------- /roles/chrony/templates/client.conf.j2: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | {% for host in groups['chrony'] %} 3 | server {% if hostvars[host]['ansible_ssh_host'] is defined %}{{ hostvars[host]['ansible_ssh_host'] }}{% else %}{{ host }}{% endif %} iburst 4 | {% endfor %} 5 | 6 | # Ignor source level 7 | stratumweight 0 8 | 9 | # Record the rate at which the system clock gains/losses time. 10 | driftfile /var/lib/chrony/drift 11 | 12 | # Allow the system clock to be stepped in the first three updates 13 | # if its offset is larger than 1 second. 14 | makestep 1.0 3 15 | 16 | # Enable kernel synchronization of the real-time clock (RTC). 17 | rtcsync 18 | 19 | # Enable hardware timestamping on all interfaces that support it. 20 | #hwtimestamp * 21 | 22 | # Increase the minimum number of selectable sources required to adjust 23 | # the system clock. 24 | #minsources 2 25 | 26 | # Allow NTP client access from local network. 27 | allow {{ local_network }} 28 | 29 | # 30 | bindcmdaddress 127.0.0.1 31 | bindcmdaddress ::1 32 | 33 | # Serve time even if not synchronized to a time source. 34 | #local stratum 10 35 | 36 | # Specify file containing keys for NTP authentication. 37 | keyfile /etc/chrony.keys 38 | 39 | # Specify directory for log files. 40 | logdir /var/log/chrony 41 | 42 | # Select which information is logged. 43 | #log measurements statistics tracking 44 | 45 | # 46 | logchange 1 47 | -------------------------------------------------------------------------------- /roles/chrony/templates/server.conf.j2: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | {% if ntp_server is defined and ntp_server %} 3 | {% for server in ntp_server.split(',') %} 4 | server {{ server }} iburst 5 | {% endfor %} 6 | {% endif %} 7 | pool pool.ntp.org iburst 8 | pool 2.debian.pool.ntp.org iburst 9 | 10 | # Ignor source level 11 | stratumweight 0 12 | 13 | # Record the rate at which the system clock gains/losses time. 14 | driftfile /var/lib/chrony/drift 15 | 16 | # Allow the system clock to be stepped in the first three updates 17 | # if its offset is larger than 1 second. 18 | makestep 1.0 3 19 | 20 | # Enable kernel synchronization of the real-time clock (RTC). 21 | rtcsync 22 | 23 | # Enable hardware timestamping on all interfaces that support it. 24 | #hwtimestamp * 25 | 26 | # Increase the minimum number of selectable sources required to adjust 27 | # the system clock. 28 | #minsources 2 29 | 30 | # Allow NTP client access from local network. 31 | allow {{ local_network }} 32 | 33 | # 34 | bindcmdaddress 127.0.0.1 35 | bindcmdaddress ::1 36 | 37 | # Serve time even if not synchronized to a time source. 38 | local stratum 10 39 | 40 | # Specify file containing keys for NTP authentication. 41 | keyfile /etc/chrony.keys 42 | 43 | # Specify directory for log files. 44 | logdir /var/log/chrony 45 | 46 | # Select which information is logged. 47 | #log measurements statistics tracking 48 | 49 | # 50 | noclientlog 51 | logchange 1 52 | -------------------------------------------------------------------------------- /roles/etcd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 当前节点ip 2 | CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" 3 | 4 | # etcd 集群间通信的IP和端口, 根据etcd组成员自动生成 5 | INITIAL_CLUSTER: "{% for host in (groups['etcd']|unique) %}{% if hostvars[host]['ansible_ssh_host'] is defined %}etcd-{{ host }}=https://{{ hostvars[host]['ansible_ssh_host'] }}:2380{% else %}etcd-{{ host }}=https://{{ host }}:2380{% endif %}{% if not loop.last %},{% endif %}{% endfor %}" 6 | 7 | # etcd 集群初始状态 new/existing 8 | CLUSTER_STATE: "new" -------------------------------------------------------------------------------- /roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Prepare directory 2 | file: name={{ item }} state=directory 3 | with_items: 4 | - "{{ etcd_data_dir }}" 5 | 6 | - name: Download etcd file 7 | get_url: 8 | validate_certs: no 9 | url: "{{ etcd_download_url }}" 10 | dest: "{{ base_dir }}/" 11 | timeout: "{{ download_timeout_online }}" 12 | tags: upgrade 13 | 14 | - name: Unarchive etcd file 15 | unarchive: 16 | src: "{{ base_dir }}/etcd-{{ etcd_version }}-linux-{{ architectures }}.tar.gz" 17 | dest: "{{ base_dir }}/" 18 | remote_src: yes 19 | tags: upgrade 20 | 21 | - name: Copy etcd file 22 | copy: 23 | src: "{{ base_dir}}/etcd-{{ etcd_version }}-linux-{{ architectures }}/{{ item }}" 24 | dest: "{{ bin_dir }}/" 25 | remote_src: yes 26 | mode: "0755" 27 | with_items: 28 | - etcd 29 | - etcdctl 30 | tags: upgrade 31 | 32 | - block: 33 | - name: Arm64 | Setup system ETCD_UNSUPPORTED_ARCH variable 34 | lineinfile: 35 | dest: "/etc/bashrc" 36 | line: 'export ETCD_UNSUPPORTED_ARCH=arm64' 37 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 38 | 39 | - name: Arm64 | Setup system ETCD_UNSUPPORTED_ARCH variable 40 | lineinfile: 41 | dest: "/etc/bash.bashrc" 42 | line: 'export ETCD_UNSUPPORTED_ARCH=arm64' 43 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 44 | when: architectures == "arm64" 45 | 46 | - name: Create etcd systemd unit file 47 | template: src=etcd.service.j2 dest=/etc/systemd/system/etcd.service 48 | tags: upgrade 49 | 50 | - name: Enable etcd service 51 | shell: systemctl enable etcd 52 | ignore_errors: true 53 | 54 | - name: Start etcd service 55 | shell: systemctl daemon-reload && systemctl restart etcd 56 | tags: upgrade 57 | 58 | - name: Wait etcd service to start 59 | shell: "systemctl status etcd.service|grep Active" 60 | register: etcd_status 61 | until: '"running" in etcd_status.stdout' 62 | retries: 8 63 | delay: 8 64 | tags: upgrade 65 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Server 3 | After=network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | Documentation=https://github.com/coreos 7 | 8 | [Service] 9 | Type=notify 10 | WorkingDirectory={{ etcd_data_dir }} 11 | {% if architectures != "amd64" -%} 12 | Environment="ETCD_UNSUPPORTED_ARCH={{ architectures }}" 13 | {%- endif %} 14 | 15 | ExecStart={{ bin_dir }}/etcd \ 16 | --name=etcd-{{ inventory_hostname }} \ 17 | --cert-file=/etc/kubernetes/pki/etcd/server.crt \ 18 | --key-file=/etc/kubernetes/pki/etcd/server.key \ 19 | --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt \ 20 | --peer-key-file=/etc/kubernetes/pki/etcd/peer.key \ 21 | --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \ 22 | --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \ 23 | --initial-advertise-peer-urls=https://{{ CURRENT_HOST_IP }}:2380 \ 24 | --listen-peer-urls=https://{{ CURRENT_HOST_IP }}:2380 \ 25 | --listen-client-urls=https://{{ CURRENT_HOST_IP }}:2379,http://127.0.0.1:2379 \ 26 | --advertise-client-urls=https://{{ CURRENT_HOST_IP }}:2379 \ 27 | --initial-cluster-token=etcd-cluster-token \ 28 | --initial-cluster={{ INITIAL_CLUSTER }} \ 29 | --initial-cluster-state={{ CLUSTER_STATE }} \ 30 | --data-dir={{ etcd_data_dir }} \ 31 | --snapshot-count={{ etcd_snapshot_count }} \ 32 | --auto-compaction-retention={{ etcd_compaction_retention }} \ 33 | --max-request-bytes={{ etcd_max_request_bytes }} \ 34 | --quota-backend-bytes={{ etcd_quota_backend_bytes }} 35 | Restart=always 36 | RestartSec=15 37 | LimitNOFILE=65536 38 | OOMScoreAdjust=-999 39 | 40 | [Install] 41 | WantedBy=multi-user.target 42 | -------------------------------------------------------------------------------- /roles/helm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | helm_home_dir: "{{ ansible_env.HOME | default('/root') }}/.helm" 2 | helm_deploy_dir: /etc/kubernetes/plugins/helm 3 | helm_namespace: kube-system 4 | tiller_sa: tiller 5 | history_max: 5 6 | # 如果默认官方repo 网络访问不稳定可以使用如下的阿里云镜像repo 7 | #repo_url: https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts -------------------------------------------------------------------------------- /roles/helm/templates/helm-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | # 绑定helm sa到 cluster-admin,这样可以兼容现有需要集群特权的charts 2 | # 3 | {% if helm_namespace not in current_ns.stdout %} 4 | --- 5 | apiVersion: v1 6 | kind: Namespace 7 | metadata: 8 | name: {{ helm_namespace }} 9 | {% endif %} 10 | --- 11 | apiVersion: v1 12 | kind: ServiceAccount 13 | metadata: 14 | name: {{ tiller_sa }} 15 | namespace: {{ helm_namespace }} 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: tiller 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: cluster-admin 25 | subjects: 26 | - kind: ServiceAccount 27 | name: {{ tiller_sa }} 28 | namespace: {{ helm_namespace }} 29 | -------------------------------------------------------------------------------- /roles/kube-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | # apiserver ip 5 | KUBE_APISERVER_IP: >- 6 | {% if lb_kube_apiserver_ip is not defined %} 7 | 127.0.0.1 8 | {% else %} 9 | {{ lb_kube_apiserver_ip }} 10 | {% endif %} 11 | 12 | KUBERNETES_SERVICE_IP: "{{ kube_service_subnet | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" 13 | CLUSTER_DNS_SERVICE_IP: "{{ kube_service_subnet | ipaddr('net') | ipaddr(10) | ipaddr('address') }}" -------------------------------------------------------------------------------- /roles/kube-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Read kubelet.conf file stat info 2 | stat: 3 | path: /etc/kubernetes/kubelet.conf 4 | register: kubelet_conf_stat 5 | 6 | - include_tasks: kubeconfig.yml 7 | when: kubelet_conf_stat.stat.exists -------------------------------------------------------------------------------- /roles/kube-master/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | #----------------------------------------------- 基础参数 -------------------------------------------------# 5 | # 当前节点ip 6 | CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" 7 | # apiserver ip 8 | KUBE_APISERVER_IP: >- 9 | {% if lb_kube_apiserver_ip is not defined %} 10 | 127.0.0.1 11 | {% else %} 12 | {{ lb_kube_apiserver_ip }} 13 | {% endif %} 14 | 15 | KUBERNETES_SERVICE_IP: "{{ kube_service_subnet | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" 16 | CLUSTER_DNS_SERVICE_IP: "{{ kube_service_subnet | ipaddr('net') | ipaddr(10) | ipaddr('address') }}" 17 | 18 | ## 存入 Etcd 时的 Secret 进行静态加密 19 | # 仅支持: aescbc, secretbox 或 aesgcm 20 | kube_encryption_algorithm: "aescbc" 21 | # 将Secret数据加密存储到etcd中的配置文件,下面加密码由 head -c 32 /dev/urandom | base64 生成 22 | kube_encrypt_token: "GPG4RC0Vyk7+Mz/niQPttxLIeL4HF96oRCcBRyKNpfM=" 23 | 24 | ## 审计相关配置 25 | # 保留审计日志最大天数 26 | audit_log_maxage: 30 27 | # 保留审计日志最大个数 28 | audit_log_maxbackups: 10 29 | # 保留审计日志最大容量(MB) 30 | audit_log_maxsize: 100 31 | # 审计日志文件挂载在主机上的目录 32 | audit_log_hostpath: /var/log/kubernetes/audit 33 | # 审计策略配置文件路径 34 | audit_policy_file: /etc/kubernetes/config/apiserver-audit-policy.yaml 35 | # 自定义审计日志规则 (替换默认的审计规则) 36 | # audit_policy_custom_rules: | 37 | # - level: None 38 | # users: [] 39 | # verbs: [] 40 | # resources: [] 41 | 42 | # 1.10+ admission plugins 43 | kube_apiserver_enable_admission_plugins: 44 | - NodeRestriction 45 | # - AlwaysPullImages 46 | # - PodSecurityPolicy 47 | 48 | # 1.10+ list of disabled admission plugins 49 | kube_apiserver_disable_admission_plugins: [] 50 | 51 | # kube-controller-manager 标记 kubelet(node) 为不健康的周期 52 | kube_controller_node_monitor_grace_period: 40s 53 | # kube-controller-manager 定期检查 kubelet(node) 状态周期 54 | kube_controller_node_monitor_period: 5s 55 | # kube-controller-manager 判定节点故障,重建 Pod 的超时时间,默认值 5m0s,这里改为了 2m0s 56 | kube_controller_pod_eviction_timeout: 2m0s 57 | # exit 状态的 pod 超过多少会触发 gc,默认值 12500,这里改为了 10 58 | kube_controller_terminated_pod_gc_threshold: 10 59 | 60 | ## Extra args for k8s components passing by kubeadm 61 | kube_kubeadm_apiserver_extra_args: {} 62 | kube_kubeadm_controller_extra_args: {} 63 | kube_kubeadm_scheduler_extra_args: {} 64 | 65 | ## Extra control plane host volume mounts 66 | ## Example: 67 | # apiserver_extra_volumes: 68 | # - name: name 69 | # hostPath: /host/path 70 | # mountPath: /mount/path 71 | # readOnly: true 72 | apiserver_extra_volumes: {} 73 | controller_manager_extra_volumes: {} 74 | scheduler_extra_volumes: {} -------------------------------------------------------------------------------- /roles/kube-master/tasks/kubeadm-config.yml: -------------------------------------------------------------------------------- 1 | - name: Confirm kubeadm version 2 | command: "{{ bin_dir }}/kubeadm version -o short" 3 | register: kubeadm_version_output 4 | 5 | - name: Setup kubeadm api version to v1beta1 6 | set_fact: 7 | kubeadmConfig_api_version: v1beta1 8 | when: 9 | - kubeadm_version_output.stdout is version('v1.13.0', '>=') 10 | - kubeadm_version_output.stdout is version('v1.15.0', '<') 11 | 12 | - name: Setup kubeadm api version to v1beta2 13 | set_fact: 14 | kubeadmConfig_api_version: v1beta2 15 | when: kubeadm_version_output.stdout is version('v1.15.0', '>=') 16 | 17 | - name: Create kubeadm configuration file 18 | template: 19 | src: >- 20 | {% if inventory_hostname == groups['kube-master'][0] -%} 21 | kubeadm-controlplane-init.{{ kubeadmConfig_api_version }}.yaml.j2 22 | {%- elif inventory_hostname in groups['kube-master'] -%} 23 | kubeadm-controlplane-join.{{ kubeadmConfig_api_version }}.yaml.j2 24 | {%- else -%} 25 | kubeadm-join.{{ kubeadmConfig_api_version }}.yaml.j2 26 | {%- endif %} 27 | dest: "/etc/kubernetes/kubeadm-config.yaml" 28 | owner: root 29 | mode: 0644 -------------------------------------------------------------------------------- /roles/kube-master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create kubernetes directory 2 | file: 3 | name: "{{ item }}" 4 | state: directory 5 | with_items: 6 | - "{{ kubelet_root_dir }}" 7 | - "{{ audit_policy_file | dirname }}" 8 | - /etc/kubernetes/pki 9 | - /etc/kubernetes/config 10 | - /etc/kubernetes/manifests 11 | - /var/log/kubernetes/audit 12 | - /usr/share/bash-completion/completions 13 | 14 | - name: Read kubelet.conf file stat info 15 | stat: 16 | path: /etc/kubernetes/kubelet.conf 17 | register: kubelet_conf_stat 18 | 19 | # 生成 kubeadm 配置 20 | - include_tasks: kubeadm-config.yml 21 | 22 | - name: Create EncryptionConfiguration configuration file 23 | template: 24 | src: secrets-encryption.yaml.j2 25 | dest: /etc/kubernetes/pki/secrets-encryption.yaml 26 | owner: root 27 | group: root 28 | mode: 0644 29 | 30 | - name: Create apiserver audit policy configuration file 31 | template: 32 | src: apiserver-audit-policy.yaml.j2 33 | dest: "{{ audit_policy_file }}" 34 | owner: root 35 | group: root 36 | mode: 0644 37 | when: 'kubernetes_audit == "yes"' 38 | 39 | # 初始化第一个 master 节点 40 | - include_tasks: master-init.yml 41 | when: 42 | - not kubelet_conf_stat.stat.exists 43 | - inventory_hostname == groups['kube-master'][0] 44 | 45 | # 初始化其他 master 节点 46 | - include_tasks: master-join.yml 47 | when: 48 | - not kubelet_conf_stat.stat.exists 49 | - inventory_hostname != groups['kube-master'][0] 50 | - inventory_hostname in groups['kube-master'] 51 | 52 | - name: Confirm kubelet configuration modify 53 | template: 54 | src: kubelet-config.v1beta1.yaml.j2 55 | dest: /var/lib/kubelet/config.yaml 56 | owner: root 57 | mode: 0644 58 | register: configuration_result 59 | 60 | - name: Restart kubelet service 61 | service: 62 | name: kubelet 63 | state: restarted 64 | enabled: yes 65 | when: configuration_result.changed -------------------------------------------------------------------------------- /roles/kube-master/tasks/master-init.yml: -------------------------------------------------------------------------------- 1 | - name: Confirm kubelet has stopped 2 | service: 3 | name: kubelet 4 | state: stopped 5 | enabled: yes 6 | 7 | - name: Initial the first master node 8 | shell: "{{ bin_dir }}/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml" 9 | 10 | - name: Node config kubeconfig 11 | include_role: 12 | name: kube-config 13 | tasks_from: kubeconfig 14 | 15 | - name: Systemctl daemon-reload 16 | systemd: 17 | daemon_reload: yes 18 | 19 | - name: Restart kubelet service 20 | service: 21 | name: kubelet 22 | state: restarted 23 | enabled: yes 24 | 25 | - name: Waiting for apiserver to running 26 | uri: 27 | url: "https://{{ CURRENT_HOST_IP }}:6443/healthz" 28 | validate_certs: no 29 | register: apiserver_result 30 | until: apiserver_result.status == 200 31 | retries: 60 32 | delay: 5 33 | 34 | - name: Waiting for kube-scheduler to running 35 | uri: 36 | url: "https://127.0.0.1:10259/healthz" 37 | validate_certs: no 38 | register: scheduler_result 39 | until: scheduler_result.status == 200 40 | retries: 60 41 | delay: 5 42 | 43 | - name: Waiting for kube-controller-manager to running 44 | uri: 45 | url: "https://127.0.0.1:10257/healthz" 46 | validate_certs: no 47 | register: controller_manager_result 48 | until: controller_manager_result.status == 200 49 | retries: 60 50 | delay: 5 51 | 52 | - name: Create kubelet certificate configuration 53 | template: 54 | src: kubelet-certificates-renewal.yaml.j2 55 | dest: /etc/kubernetes/config/kubelet-certificates-renewal.yaml 56 | owner: root 57 | group: root 58 | mode: 0644 59 | 60 | - name: Authorize kubelet to automatically rotate server certificates 61 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/config/kubelet-certificates-renewal.yaml" 62 | 63 | - block: 64 | - name: Create Pod security policy configuration file 65 | template: 66 | src: pod-security-policy.yaml.j2 67 | dest: /etc/kubernetes/config/pod-security-policy.yaml 68 | owner: root 69 | group: root 70 | mode: 0644 71 | 72 | - name: Configure Pod security policy 73 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/config/pod-security-policy.yaml" 74 | when: '"PodSecurityPolicy" in kube_apiserver_enable_admission_plugins' -------------------------------------------------------------------------------- /roles/kube-master/tasks/master-join.yml: -------------------------------------------------------------------------------- 1 | - name: Confirm kubelet has stopped 2 | service: 3 | name: kubelet 4 | state: stopped 5 | enabled: yes 6 | 7 | - name: Other master node to join the cluster 8 | shell: > 9 | {{ bin_dir }}/kubeadm join --config /etc/kubernetes/kubeadm-config.yaml 10 | --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests 11 | 12 | - include_tasks: "{{ (role_path + '/../kube-config/tasks/kubeconfig.yml') | realpath }}" 13 | 14 | - name: Systemctl daemon-reload 15 | systemd: 16 | daemon_reload: yes 17 | 18 | - name: Restart kubelet service 19 | service: 20 | name: kubelet 21 | state: restarted 22 | enabled: yes 23 | 24 | - name: Waiting for apiserver to running 25 | uri: 26 | url: "https://{{ CURRENT_HOST_IP }}:6443/healthz" 27 | validate_certs: no 28 | register: apiserver_result 29 | until: apiserver_result.status == 200 30 | retries: 60 31 | delay: 5 32 | 33 | - name: Waiting for kube-scheduler to running 34 | uri: 35 | url: "https://127.0.0.1:10259/healthz" 36 | validate_certs: no 37 | register: scheduler_result 38 | until: scheduler_result.status == 200 39 | retries: 60 40 | delay: 5 41 | 42 | - name: Waiting for kube-controller-manager to running 43 | uri: 44 | url: "https://127.0.0.1:10257/healthz" 45 | validate_certs: no 46 | register: controller_manager_result 47 | until: controller_manager_result.status == 200 48 | retries: 60 49 | delay: 5 -------------------------------------------------------------------------------- /roles/kube-master/templates/kubelet-certificates-renewal.yaml.j2: -------------------------------------------------------------------------------- 1 | # A ClusterRole which instructs the CSR approver to approve a node requesting a 2 | # serving cert matching its client cert. 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver 7 | rules: 8 | - apiGroups: ["certificates.k8s.io"] 9 | resources: ["certificatesigningrequests/selfnodeserver"] 10 | verbs: ["create"] 11 | --- 12 | kind: ClusterRoleBinding 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | metadata: 15 | name: kubeadm:node-autoapprove-certificate-renewal 16 | subjects: 17 | - kind: Group 18 | name: system:nodes 19 | apiGroup: rbac.authorization.k8s.io 20 | roleRef: 21 | kind: ClusterRole 22 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver 23 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /roles/kube-master/templates/kubelet-config.v1beta1.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kubelet.config.k8s.io/v1beta1 2 | kind: KubeletConfiguration 3 | address: 0.0.0.0 4 | authentication: 5 | anonymous: 6 | enabled: false 7 | webhook: 8 | cacheTTL: 2m0s 9 | enabled: true 10 | x509: 11 | clientCAFile: /etc/kubernetes/pki/ca.crt 12 | authorization: 13 | mode: Webhook 14 | webhook: 15 | cacheAuthorizedTTL: 5m0s 16 | cacheUnauthorizedTTL: 30s 17 | cgroupDriver: {{ cgroup_driver }} 18 | cgroupsPerQOS: true 19 | clusterDNS: 20 | - {{ CLUSTER_DNS_SERVICE_IP }} 21 | clusterDomain: {{ kube_dns_domain }} 22 | configMapAndSecretChangeDetectionStrategy: Watch 23 | containerLogMaxFiles: 5 24 | containerLogMaxSize: 10Mi 25 | contentType: application/vnd.kubernetes.protobuf 26 | cpuCFSQuota: true 27 | cpuCFSQuotaPeriod: 100ms 28 | cpuManagerPolicy: none 29 | cpuManagerReconcilePeriod: 10s 30 | enableControllerAttachDetach: true 31 | enableDebuggingHandlers: true 32 | enforceNodeAllocatable: 33 | - pods 34 | eventBurst: 10 35 | eventRecordQPS: 5 36 | evictionHard: 37 | imagefs.available: {{ eviction_hard_imagefs_available }} 38 | memory.available: {{ eviction_hard_memory_available }} 39 | nodefs.available: {{ eviction_hard_nodefs_available }} 40 | nodefs.inodesFree: {{ eviction_hard_nodefs_inodes_free }} 41 | kubeReserved: 42 | cpu: {{ kube_cpu_reserved }} 43 | memory: {{ kube_memory_reserved|regex_replace('Mi', 'M') }} 44 | {% if system_reserved_enabled is defined and system_reserved_enabled %} 45 | systemReserved: 46 | cpu: {{ system_cpu_reserved|default('500m') }} 47 | memory: {{ system_memory_reserved|default('512M')|regex_replace('Mi', 'M') }} 48 | ephemeral-storage: {{ system_ephemeral_storage_reserved|default('10Gi')|regex_replace('Gi', 'G') }} 49 | {% endif %} 50 | evictionPressureTransitionPeriod: 5m0s 51 | failSwapOn: true 52 | featureGates: 53 | RotateKubeletServerCertificate: true 54 | fileCheckFrequency: 20s 55 | hairpinMode: promiscuous-bridge 56 | healthzBindAddress: 127.0.0.1 57 | healthzPort: 10248 58 | httpCheckFrequency: 20s 59 | imageGCHighThresholdPercent: 85 60 | imageGCLowThresholdPercent: 80 61 | imageMinimumGCAge: 2m0s 62 | iptablesDropBit: 15 63 | iptablesMasqueradeBit: 14 64 | kubeAPIBurst: 10 65 | kubeAPIQPS: 5 66 | makeIPTablesUtilChains: true 67 | maxOpenFiles: 1000000 68 | maxPods: {{ kube_max_pods }} 69 | nodeLeaseDurationSeconds: 40 70 | nodeStatusReportFrequency: 1m0s 71 | nodeStatusUpdateFrequency: 10s 72 | oomScoreAdj: -999 73 | podPidsLimit: -1 74 | port: 10250 75 | protectKernelDefaults: true 76 | readOnlyPort: 0 77 | registryBurst: 10 78 | registryPullQPS: 5 79 | {% if ansible_distribution == "Ubuntu" %} 80 | resolvConf: /run/systemd/resolve/resolv.conf 81 | {% else %} 82 | resolvConf: /etc/resolv.conf 83 | {% endif %} 84 | rotateCertificates: true 85 | runtimeRequestTimeout: 2m0s 86 | serializeImagePulls: true 87 | staticPodPath: /etc/kubernetes/manifests 88 | streamingConnectionIdleTimeout: 4h0m0s 89 | syncFrequency: 1m0s 90 | tlsCertFile: /var/lib/kubelet/pki/kubelet.crt 91 | tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key 92 | volumeStatsAggPeriod: 1m0s -------------------------------------------------------------------------------- /roles/kube-master/templates/secrets-encryption.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - {{ kube_encryption_algorithm }}: 8 | keys: 9 | - name: key 10 | secret: {{ kube_encrypt_token }} 11 | - identity: {} -------------------------------------------------------------------------------- /roles/kube-worker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | # 当前节点ip 5 | CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" 6 | # apiserver ip 7 | KUBE_APISERVER_IP: >- 8 | {% if lb_kube_apiserver_ip is not defined %} 9 | 127.0.0.1 10 | {% else %} 11 | {{ lb_kube_apiserver_ip }} 12 | {% endif %} 13 | 14 | CLUSTER_DNS_SERVICE_IP: "{{ kube_service_subnet | ipaddr('net') | ipaddr(10) | ipaddr('address') }}" 15 | -------------------------------------------------------------------------------- /roles/kube-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create kubernetes directory 2 | file: 3 | name: "{{ item }}" 4 | state: directory 5 | with_items: 6 | - "{{ kubelet_root_dir }}" 7 | - /etc/kubernetes 8 | - /usr/share/bash-completion/completions 9 | 10 | - name: Read kubelet.conf file stat info 11 | stat: 12 | path: /etc/kubernetes/kubelet.conf 13 | register: kubelet_conf_stat 14 | 15 | - include_tasks: "{{ (role_path + '/../kube-master/tasks/kubeadm-config.yml') | realpath }}" 16 | when: 17 | - inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 18 | - inventory_hostname not in groups['kube-master'] 19 | 20 | - block: 21 | - name: Confirm kubelet has stopped 22 | service: 23 | name: kubelet 24 | state: stopped 25 | enabled: yes 26 | 27 | - name: Worker node join the cluster 28 | shell: > 29 | {{ bin_dir }}/kubeadm join --config /etc/kubernetes/kubeadm-config.yaml 30 | --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests 31 | when: 32 | - inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 33 | - inventory_hostname not in groups['kube-master'] 34 | - not kubelet_conf_stat.stat.exists 35 | 36 | - block: 37 | - name: Confirm kubelet configuration modify 38 | template: 39 | src: kubelet-config.v1beta1.yaml.j2 40 | dest: /var/lib/kubelet/config.yaml 41 | owner: root 42 | mode: 0644 43 | register: configuration_result 44 | 45 | - name: Restart kubelet service 46 | service: 47 | name: kubelet 48 | state: restarted 49 | enabled: yes 50 | when: configuration_result.changed 51 | when: 52 | - inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 53 | - inventory_hostname not in groups['kube-master'] 54 | 55 | - name: Cancel the master node taint in the worker group 56 | shell: > 57 | {{ bin_dir }}/kubectl taint nodes {{inventory_hostname}} node-role.kubernetes.io/master='':NoSchedule --overwrite && 58 | {{ bin_dir }}/kubectl taint nodes {{inventory_hostname}} node-role.kubernetes.io/master- 59 | delegate_to: "{{ groups['kube-master'][0] }}" 60 | ignore_errors: true 61 | when: 62 | - inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 63 | - inventory_hostname in groups['kube-master'] 64 | 65 | - name: Create kubeconfig directory 66 | file: 67 | name: "{{ item }}" 68 | state: directory 69 | with_items: 70 | - "{{ ansible_env.PWD }}/.kube" 71 | - "{{ ansible_env.HOME }}/.kube" 72 | 73 | - name: Copy kubeconfig file to .kube directory 74 | copy: 75 | src: "{{ playbook_dir }}/{{ cluster_name }}/kubeconfig/config" 76 | dest: "{{ item }}/config" 77 | mode: 0600 78 | with_items: 79 | - "{{ ansible_env.PWD | default('/root') }}/.kube" 80 | - "{{ ansible_env.HOME | default('/root') }}/.kube" 81 | 82 | - name: Set the permissions for the kubeconfig file 83 | file: 84 | path: "{{ ansible_env.PWD | default('/root') }}/.kube/config" 85 | owner: "{{ ansible_env.SUDO_USER | default('root') }}" 86 | mode: '0600' -------------------------------------------------------------------------------- /roles/kube-worker/templates/kubelet-config.v1beta1.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kubelet.config.k8s.io/v1beta1 2 | kind: KubeletConfiguration 3 | address: 0.0.0.0 4 | authentication: 5 | anonymous: 6 | enabled: false 7 | webhook: 8 | cacheTTL: 2m0s 9 | enabled: true 10 | x509: 11 | clientCAFile: /etc/kubernetes/pki/ca.crt 12 | authorization: 13 | mode: Webhook 14 | webhook: 15 | cacheAuthorizedTTL: 5m0s 16 | cacheUnauthorizedTTL: 30s 17 | cgroupDriver: {{ cgroup_driver }} 18 | cgroupsPerQOS: true 19 | clusterDNS: 20 | - {{ CLUSTER_DNS_SERVICE_IP }} 21 | clusterDomain: {{ kube_dns_domain }} 22 | configMapAndSecretChangeDetectionStrategy: Watch 23 | containerLogMaxFiles: 5 24 | containerLogMaxSize: 10Mi 25 | contentType: application/vnd.kubernetes.protobuf 26 | cpuCFSQuota: true 27 | cpuCFSQuotaPeriod: 100ms 28 | cpuManagerPolicy: none 29 | cpuManagerReconcilePeriod: 10s 30 | enableControllerAttachDetach: true 31 | enableDebuggingHandlers: true 32 | enforceNodeAllocatable: 33 | - pods 34 | eventBurst: 10 35 | eventRecordQPS: 5 36 | evictionHard: 37 | imagefs.available: {{ eviction_hard_imagefs_available }} 38 | memory.available: {{ eviction_hard_memory_available }} 39 | nodefs.available: {{ eviction_hard_nodefs_available }} 40 | nodefs.inodesFree: {{ eviction_hard_nodefs_inodes_free }} 41 | kubeReserved: 42 | cpu: {{ kube_cpu_reserved }} 43 | memory: {{ kube_memory_reserved|regex_replace('Mi', 'M') }} 44 | {% if system_reserved_enabled is defined and system_reserved_enabled %} 45 | systemReserved: 46 | cpu: {{ system_cpu_reserved|default('500m') }} 47 | memory: {{ system_memory_reserved|default('512M')|regex_replace('Mi', 'M') }} 48 | ephemeral-storage: {{ system_ephemeral_storage_reserved|default('10Gi')|regex_replace('Gi', 'G') }} 49 | {% endif %} 50 | evictionPressureTransitionPeriod: 5m0s 51 | failSwapOn: true 52 | featureGates: 53 | RotateKubeletServerCertificate: true 54 | fileCheckFrequency: 20s 55 | hairpinMode: promiscuous-bridge 56 | healthzBindAddress: 127.0.0.1 57 | healthzPort: 10248 58 | httpCheckFrequency: 20s 59 | imageGCHighThresholdPercent: 85 60 | imageGCLowThresholdPercent: 80 61 | imageMinimumGCAge: 2m0s 62 | iptablesDropBit: 15 63 | iptablesMasqueradeBit: 14 64 | kubeAPIBurst: 10 65 | kubeAPIQPS: 5 66 | makeIPTablesUtilChains: true 67 | maxOpenFiles: 1000000 68 | maxPods: {{ kube_max_pods }} 69 | nodeLeaseDurationSeconds: 40 70 | nodeStatusReportFrequency: 1m0s 71 | nodeStatusUpdateFrequency: 10s 72 | oomScoreAdj: -999 73 | podPidsLimit: -1 74 | port: 10250 75 | protectKernelDefaults: true 76 | readOnlyPort: 0 77 | registryBurst: 10 78 | registryPullQPS: 5 79 | {% if ansible_distribution == "Ubuntu" %} 80 | resolvConf: /run/systemd/resolve/resolv.conf 81 | {% else %} 82 | resolvConf: /etc/resolv.conf 83 | {% endif %} 84 | rotateCertificates: true 85 | runtimeRequestTimeout: 2m0s 86 | serializeImagePulls: true 87 | staticPodPath: /etc/kubernetes/manifests 88 | streamingConnectionIdleTimeout: 4h0m0s 89 | syncFrequency: 1m0s 90 | tlsCertFile: /var/lib/kubelet/pki/kubelet.crt 91 | tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key 92 | volumeStatsAggPeriod: 1m0s -------------------------------------------------------------------------------- /roles/load-balancer/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | # haproxy监控绑定端口 5 | lb_haproxy_stats_bind_address: 9090 6 | # haproxy监控访问路径 7 | lb_haproxy_stats_uri: "/stats" 8 | # haproxy监控自动刷新时间(秒) 9 | lb_haproxy_stats_refresh: 10 10 | # haproxy监控用户名 11 | lb_haproxy_stats_user: "admin" 12 | # haproxy监控用户密码 13 | lb_haproxy_stats_password: "admin" 14 | # haproxy负载均衡算法,常见如下: 15 | # "roundrobin": 基于服务器权重的轮询 16 | # "leastconn": 基于服务器最小连接数 17 | # "source": 基于请求源IP地址 18 | # "uri": 基于请求的URI 19 | lb_haproxy_balance_alg: "roundrobin" 20 | 21 | # 区分多个instance的VRRP组播,同网段不能重复,取值在0-255之间 22 | # 因项目已设置vrrp报文单播模式,所以这个 ROUTER_ID 即便同网段里面有重复也没关系 23 | lb_keepalived_router_id: 222 24 | 25 | ssh_host: "{% if node_name_rule == 'hostname' %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" 26 | -------------------------------------------------------------------------------- /roles/load-balancer/tasks/internal.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: CentOS | Uninstall haproxy 3 | yum: 4 | name: haproxy 5 | state: absent 6 | 7 | - name: CentOS | Install haproxy 8 | yum: 9 | name: haproxy 10 | state: present 11 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 12 | 13 | - block: 14 | - name: Debian | Uninstall haproxy 15 | apt: 16 | name: haproxy 17 | state: absent 18 | 19 | - name: Debian | Install haproxy 20 | apt: 21 | name: haproxy 22 | state: present 23 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 24 | 25 | - name: Create haproxy directory 26 | file: name=/etc/haproxy state=directory 27 | 28 | - name: Configuration haproxy 29 | template: 30 | src: haproxy/haproxy.cfg.j2 31 | dest: /etc/haproxy/haproxy.cfg 32 | 33 | - name: Systemctl daemon-reload 34 | systemd: 35 | daemon_reload: yes 36 | 37 | - name: Enable haproxy service 38 | service: 39 | name: haproxy 40 | state: restarted 41 | enabled: yes 42 | 43 | - name: Waiting for haproxy to running 44 | shell: "systemctl status haproxy.service|grep Active" 45 | register: haproxy_status 46 | until: '"running" in haproxy_status.stdout' 47 | retries: 8 48 | delay: 2 49 | -------------------------------------------------------------------------------- /roles/load-balancer/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: external.yml 2 | when: lb_mode == "external" 3 | 4 | - include_tasks: internal.yml 5 | when: lb_mode == "internal" -------------------------------------------------------------------------------- /roles/load-balancer/templates/haproxy/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local1 warning 3 | chroot /var/lib/haproxy 4 | user haproxy 5 | group haproxy 6 | daemon 7 | nbproc 1 8 | 9 | defaults 10 | log global 11 | timeout connect 5s 12 | timeout client 10m 13 | timeout server 10m 14 | 15 | listen kube-master 16 | bind :{{ lb_kube_apiserver_port }} 17 | mode tcp 18 | option tcplog 19 | option dontlognull 20 | option dontlog-normal 21 | balance {{ lb_haproxy_balance_alg }} 22 | {% for host in groups['kube-master'] %} 23 | server {{ host }} {% if hostvars[host]['ansible_ssh_host'] is defined %}{{ hostvars[host]['ansible_ssh_host'] }}{% else %}{{ host }}{% endif %}:6443 check check-ssl verify none 24 | {% endfor %} -------------------------------------------------------------------------------- /roles/load-balancer/templates/keepalived-backup.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | router_id lb-backup-{{ ssh_host }} 3 | script_user root 4 | } 5 | 6 | vrrp_script check-haproxy { 7 | script "/usr/bin/killall -0 haproxy" 8 | interval 5 9 | weight -60 10 | } 11 | 12 | vrrp_instance VI-kube_master { 13 | state BACKUP 14 | priority {{ 119 | random(61, 1) }} 15 | unicast_src_ip {{ ssh_host }} 16 | unicast_peer { 17 | {% for h in groups['ex_lb'] %} 18 | {% if h != inventory_hostname %} 19 | {% if node_name_rule == 'hostname' %} 20 | {{ hostvars[h]['ansible_ssh_host'] }} 21 | {% else %} 22 | {{ h }} 23 | {% endif %} 24 | {% endif %} 25 | {% endfor %} 26 | } 27 | dont_track_primary 28 | interface {{ LB_IF }} 29 | virtual_router_id {{ lb_keepalived_router_id }} 30 | advert_int 3 31 | track_script { 32 | check-haproxy 33 | } 34 | virtual_ipaddress { 35 | {{ lb_kube_apiserver_ip }} 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /roles/load-balancer/templates/keepalived-master.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | router_id lb-master-{{ ssh_host }} 3 | script_user root 4 | } 5 | 6 | vrrp_script check-haproxy { 7 | script "/usr/bin/killall -0 haproxy" 8 | interval 5 9 | weight -60 10 | } 11 | 12 | vrrp_instance VI-kube_master { 13 | state MASTER 14 | priority 120 15 | unicast_src_ip {{ ssh_host }} 16 | unicast_peer { 17 | {% for h in groups['ex_lb'] %} 18 | {% if h != inventory_hostname %} 19 | {% if node_name_rule == 'hostname' %} 20 | {{ hostvars[h]['ansible_ssh_host'] }} 21 | {% else %} 22 | {{ h }} 23 | {% endif %} 24 | {% endif %} 25 | {% endfor %} 26 | } 27 | dont_track_primary 28 | interface {{ LB_IF }} 29 | virtual_router_id {{ lb_keepalived_router_id }} 30 | advert_int 3 31 | track_script { 32 | check-haproxy 33 | } 34 | virtual_ipaddress { 35 | {{ lb_kube_apiserver_ip }} 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/defaults/main.yml: -------------------------------------------------------------------------------- 1 | cinder_csi_controller_replicas: 1 -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Cinder | Prepare Cinder directory 3 | file: 4 | name: "{{ kube_config_dir }}/plugins/storage-plugin/cinder" 5 | state: directory 6 | 7 | - name: Cinder | Generate Cinder cloud-config 8 | template: 9 | src: "cinder-csi-cloud-config.j2" 10 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/cinder/cinder_cloud_config" 11 | mode: 0640 12 | 13 | - name: Cinder | Get base64 cloud-config 14 | slurp: 15 | src: "{{ kube_config_dir }}/plugins/storage-plugin/cinder/cinder_cloud_config" 16 | register: cloud_config_secret 17 | tags: cinder-csi-driver 18 | 19 | - name: Cinder | Generate Manifests 20 | template: 21 | src: "{{ item.file }}.j2" 22 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/cinder/{{ item.file }}" 23 | with_items: 24 | - {name: cinder-csi-driver, file: cinder-csi-driver.yml} 25 | - {name: cinder-csi-cloud-config-secret, file: cinder-csi-cloud-config-secret.yml} 26 | - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin-rbac.yml} 27 | - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin.yml} 28 | - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin-rbac.yml} 29 | - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml} 30 | - {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml} 31 | register: cinder_csi_manifests 32 | 33 | - name: Cinder CSI Driver | Delete Manifests 34 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/cinder/{{ item.item.file }}" 35 | with_items: 36 | - "{{ cinder_csi_manifests.results }}" 37 | when: 38 | - not item is skipped 39 | loop_control: 40 | label: "{{ item.item.file }}" 41 | when: architectures == 'amd64' 42 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Cinder | Prepare Cinder directory 3 | file: 4 | name: "{{ kube_config_dir }}/plugins/storage-plugin/cinder" 5 | state: directory 6 | 7 | - name: Cinder | Generate Cinder cloud-config 8 | template: 9 | src: "cinder-csi-cloud-config.j2" 10 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/cinder/cinder_cloud_config" 11 | mode: 0640 12 | 13 | - name: Cinder | Get base64 cloud-config 14 | slurp: 15 | src: "{{ kube_config_dir }}/plugins/storage-plugin/cinder/cinder_cloud_config" 16 | register: cloud_config_secret 17 | tags: cinder-csi-driver 18 | 19 | - name: Cinder | Generate Manifests 20 | template: 21 | src: "{{ item.file }}.j2" 22 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/cinder/{{ item.file }}" 23 | with_items: 24 | - {name: cinder-csi-driver, file: cinder-csi-driver.yml} 25 | - {name: cinder-csi-cloud-config-secret, file: cinder-csi-cloud-config-secret.yml} 26 | - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin-rbac.yml} 27 | - {name: cinder-csi-controllerplugin, file: cinder-csi-controllerplugin.yml} 28 | - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin-rbac.yml} 29 | - {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml} 30 | - {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml} 31 | register: cinder_csi_manifests 32 | 33 | - name: Cinder CSI Driver | Apply Manifests 34 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/cinder/{{ item.item.file }}" 35 | with_items: 36 | - "{{ cinder_csi_manifests.results }}" 37 | when: 38 | - not item is skipped 39 | loop_control: 40 | label: "{{ item.item.file }}" 41 | when: architectures == 'amd64' 42 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_cinder_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_cinder_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/templates/cinder-csi-cloud-config-secret.yml.j2: -------------------------------------------------------------------------------- 1 | # This YAML file contains secret objects, 2 | # which are necessary to run csi cinder plugin. 3 | 4 | kind: Secret 5 | apiVersion: v1 6 | metadata: 7 | name: cloud-config 8 | namespace: {{ provisioner_namespace }} 9 | data: 10 | cloud.conf: {{ cloud_config_secret.content }} 11 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/templates/cinder-csi-cloud-config.j2: -------------------------------------------------------------------------------- 1 | [Global] 2 | auth-url = "{{ cinder_auth_url }}" 3 | username = "{{ cinder_username }}" 4 | password = "{{ cinder_password }}" 5 | region = "{{ cinder_region }}" 6 | tenant-name = "{{ cinder_tenant_name }}" 7 | domain-name = "{{ cinder_domain_name }}" 8 | 9 | [BlockStorage] 10 | {% if cinder_blockstorage_version is defined and cinder_blockstorage_version != "" %} 11 | bs-version={{ cinder_blockstorage_version }} 12 | {% endif %} 13 | {% if node_volume_attach_limit is defined and node_volume_attach_limit != "" %} 14 | node-volume-attach-limit="{{ node_volume_attach_limit }}" 15 | {% endif %} 16 | 17 | [Metadata] 18 | search-order = metadataService,configDrive -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/templates/cinder-csi-driver.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: cinder.csi.openstack.org 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: true 8 | volumeLifecycleModes: 9 | - Persistent 10 | - Ephemeral 11 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/templates/cinder-csi-nodeplugin-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | # This YAML defines all API objects to create RBAC roles for csi node plugin. 2 | 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: csi-cinder-node-sa 7 | namespace: {{ provisioner_namespace }} 8 | --- 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | metadata: 12 | name: csi-nodeplugin-role 13 | rules: 14 | - apiGroups: [""] 15 | resources: ["events"] 16 | verbs: ["get", "list", "watch", "create", "update", "patch"] 17 | 18 | --- 19 | kind: ClusterRoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: csi-nodeplugin-binding 23 | subjects: 24 | - kind: ServiceAccount 25 | name: csi-cinder-node-sa 26 | namespace: {{ provisioner_namespace }} 27 | roleRef: 28 | kind: ClusterRole 29 | name: csi-nodeplugin-role 30 | apiGroup: rbac.authorization.k8s.io 31 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: cinder-csi-pdb 5 | namespace: {{ provisioner_namespace }} 6 | spec: 7 | {% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} 8 | minAvailable: 1 9 | {% else %} 10 | minAvailable: 0 11 | {% endif %} 12 | selector: 13 | matchLabels: 14 | app: csi-cinder-controllerplugin 15 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/cinder/templates/storageclass.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: csi-cinder-sc 5 | provisioner: cinder.csi.openstack.org 6 | allowVolumeExpansion: true 7 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-ceph-block/defaults/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/cluster-storage/external-ceph-block/defaults/main.yml -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-ceph-block/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: RBD Provisioner | Create storage-plugin directory 3 | file: 4 | name: "{{ kube_config_dir }}/plugins/storage-plugin/rbd-provisioner" 5 | state: directory 6 | when: inventory_hostname == groups['kube-master'][0] 7 | 8 | - name: RBD Provisioner | Templates list 9 | set_fact: 10 | rbd_provisioner_templates: 11 | - { file: 'ceph-rbd-provisioner.yaml' } 12 | 13 | - name: RBD Provisioner | Create manifests 14 | template: 15 | src: "{{ item.file }}.j2" 16 | dest: '{{ kube_config_dir }}/plugins/storage-plugin/rbd-provisioner/{{ item.file }}' 17 | with_items: "{{ rbd_provisioner_templates }}" 18 | when: inventory_hostname == groups['kube-master'][0] 19 | 20 | - name: RBD Provisioner | Deploy manifests 21 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rbd-provisioner/{{ item.file }}" 22 | with_items: "{{ rbd_provisioner_templates }}" 23 | when: inventory_hostname == groups['kube-master'][0] 24 | when: architectures == 'amd64' -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-ceph-block/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Ceph | Install ceph-common 3 | package: 4 | name: ceph-common 5 | state: present 6 | delegate_to: "{{ item }}" 7 | with_items: "{{ groups['all'] }}" 8 | tags: add_worker 9 | 10 | - name: RBD Provisioner | Create storage-plugin directory 11 | file: 12 | name: "{{ kube_config_dir }}/plugins/storage-plugin/rbd-provisioner" 13 | state: directory 14 | when: inventory_hostname == groups['kube-master'][0] 15 | 16 | - name: RBD Provisioner | Templates list 17 | set_fact: 18 | rbd_provisioner_templates: 19 | - { file: 'ceph-rbd-provisioner.yaml' } 20 | 21 | - name: RBD Provisioner | Create manifests 22 | template: 23 | src: "{{ item.file }}.j2" 24 | dest: '{{ kube_config_dir }}/plugins/storage-plugin/rbd-provisioner/{{ item.file }}' 25 | with_items: "{{ rbd_provisioner_templates }}" 26 | when: inventory_hostname == groups['kube-master'][0] 27 | 28 | - name: RBD Provisioner | Deploy manifests 29 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rbd-provisioner/{{ item.file }}" 30 | with_items: "{{ rbd_provisioner_templates }}" 31 | when: inventory_hostname == groups['kube-master'][0] 32 | when: architectures == 'amd64' -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-ceph-block/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_external_ceph_block_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_external_ceph_block_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-ceph-block/templates/storageclass.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: {{ storageClassName }} 5 | provisioner: {{ storage_rbd_provisioner_name }} 6 | reclaimPolicy: Delete 7 | volumeBindingMode: Immediate 8 | parameters: 9 | monitors: {{ ceph_monitor }} 10 | adminId: {{ ceph_admin_id }} 11 | adminSecretName: {{ ceph_admin_secret_name }} 12 | adminSecretNamespace: {{ ceph_admin_secret_namespace }} 13 | pool: {{ ceph_osd_pool }} 14 | userId: {{ ceph_user_id }} 15 | userSecretName: {{ ceph_user_secret_name }} 16 | fsType: {{ ceph_fsType }} 17 | imageFormat: "{{ ceph_imageFormat }}" 18 | {% if ceph_imageFormat and ceph_imageFormat == '2' %} 19 | imageFeatures: "layering" 20 | {% endif %} -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-cephfs/defaults/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/cluster-storage/external-cephfs/defaults/main.yml -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-cephfs/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: FS Provisioner | Create storage-plugin directory 3 | file: 4 | name: "{{ kube_config_dir }}/plugins/storage-plugin/fs-provisioner" 5 | state: directory 6 | 7 | - name: FS Provisioner | Templates list 8 | set_fact: 9 | fs_provisioner_templates: 10 | - { file: 'ceph-fs-provisioner.yaml' } 11 | 12 | - name: FS Provisioner | Create manifests 13 | template: 14 | src: "{{ item.file }}.j2" 15 | dest: '{{ kube_config_dir }}/plugins/storage-plugin/fs-provisioner/{{ item.file }}' 16 | with_items: "{{ fs_provisioner_templates }}" 17 | 18 | - name: FS Provisioner | Deploy manifests 19 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/fs-provisioner/{{ item.file }}" 20 | with_items: "{{ fs_provisioner_templates }}" 21 | when: architectures == 'amd64' 22 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-cephfs/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Ceph | Install ceph-common 3 | package: 4 | name: ceph-common 5 | state: present 6 | delegate_to: "{{ item }}" 7 | with_items: "{{ groups['all'] }}" 8 | tags: add_worker 9 | 10 | - name: FS Provisioner | Create storage-plugin directory 11 | file: 12 | name: "{{ kube_config_dir }}/plugins/storage-plugin/fs-provisioner" 13 | state: directory 14 | 15 | - name: FS Provisioner | Templates list 16 | set_fact: 17 | fs_provisioner_templates: 18 | - { file: 'ceph-fs-provisioner.yaml' } 19 | 20 | - name: FS Provisioner | Create manifests 21 | template: 22 | src: "{{ item.file }}.j2" 23 | dest: '{{ kube_config_dir }}/plugins/storage-plugin/fs-provisioner/{{ item.file }}' 24 | with_items: "{{ fs_provisioner_templates }}" 25 | 26 | - name: FS Provisioner | Deploy manifests 27 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/fs-provisioner/{{ item.file }}" 28 | with_items: "{{ fs_provisioner_templates }}" 29 | when: architectures == 'amd64' 30 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-cephfs/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_external_cephfs_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_external_cephfs_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-cephfs/templates/ceph-fs-provisioner.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ storage_fs_provisioner_name }} 6 | namespace: {{ provisioner_namespace }} 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: {{ storage_fs_provisioner_name }} 12 | namespace: {{ provisioner_namespace }} 13 | rules: 14 | - apiGroups: [""] 15 | resources: ["persistentvolumes"] 16 | verbs: ["get", "list", "watch", "create", "delete"] 17 | - apiGroups: [""] 18 | resources: ["persistentvolumeclaims"] 19 | verbs: ["get", "list", "watch", "update"] 20 | - apiGroups: ["storage.k8s.io"] 21 | resources: ["storageclasses"] 22 | verbs: ["get", "list", "watch"] 23 | - apiGroups: [""] 24 | resources: ["events"] 25 | verbs: ["create", "update", "patch"] 26 | - apiGroups: [""] 27 | resources: ["services"] 28 | resourceNames: ["kube-dns","coredns"] 29 | verbs: ["list", "get"] 30 | --- 31 | kind: ClusterRoleBinding 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | metadata: 34 | name: {{ storage_fs_provisioner_name }} 35 | subjects: 36 | - kind: ServiceAccount 37 | name: {{ storage_fs_provisioner_name }} 38 | namespace: {{ provisioner_namespace }} 39 | roleRef: 40 | kind: ClusterRole 41 | name: {{ storage_fs_provisioner_name }} 42 | apiGroup: rbac.authorization.k8s.io 43 | --- 44 | apiVersion: rbac.authorization.k8s.io/v1 45 | kind: Role 46 | metadata: 47 | name: {{ storage_fs_provisioner_name }} 48 | namespace: {{ provisioner_namespace }} 49 | rules: 50 | - apiGroups: [""] 51 | resources: ["secrets"] 52 | verbs: ["create", "get", "delete"] 53 | - apiGroups: [""] 54 | resources: ["endpoints"] 55 | verbs: ["get", "list", "watch", "create", "update", "patch"] 56 | --- 57 | apiVersion: rbac.authorization.k8s.io/v1 58 | kind: RoleBinding 59 | metadata: 60 | name: {{ storage_fs_provisioner_name }} 61 | namespace: {{ provisioner_namespace }} 62 | roleRef: 63 | apiGroup: rbac.authorization.k8s.io 64 | kind: Role 65 | name: {{ storage_fs_provisioner_name }} 66 | subjects: 67 | - kind: ServiceAccount 68 | name: {{ storage_fs_provisioner_name }} 69 | namespace: {{ provisioner_namespace }} 70 | --- 71 | apiVersion: apps/v1 72 | kind: Deployment 73 | metadata: 74 | name: {{ storage_fs_provisioner_name }} 75 | namespace: {{ provisioner_namespace }} 76 | labels: 77 | app: fs-provisioner 78 | version: {{ fs_provisioner_version}} 79 | spec: 80 | replicas: 1 81 | selector: 82 | matchLabels: 83 | app: {{ storage_fs_provisioner_name }} 84 | version: {{ fs_provisioner_version}} 85 | strategy: 86 | type: Recreate 87 | template: 88 | metadata: 89 | labels: 90 | app: {{ storage_fs_provisioner_name }} 91 | version: {{ fs_provisioner_version}} 92 | spec: 93 | containers: 94 | - name: cephfs-provisioner 95 | image: {{ fs_provisioner_image }} 96 | env: 97 | - name: PROVISIONER_NAME 98 | value: {{ storage_fs_provisioner_name }} 99 | - name: PROVISIONER_SECRET_NAMESPACE 100 | value: {{ provisioner_namespace }} 101 | command: 102 | - "/usr/local/bin/cephfs-provisioner" 103 | args: 104 | - "-id=cephfs-provisioner-1" 105 | serviceAccount: {{ storage_fs_provisioner_name }} 106 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/external-cephfs/templates/storageclass.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: {{ storageClassName }} 5 | provisioner: {{ storage_fs_provisioner_name }} 6 | parameters: 7 | monitors: {{ ceph_monitor }} 8 | adminId: {{ ceph_admin_id }} 9 | adminSecretNamespace: {{ ceph_admin_secret_namespace }} 10 | adminSecretName: {{ ceph_admin_secret_name}} -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/glusterfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: CentOS | Install glusterfs mount utils 3 | yum: 4 | name: 5 | - glusterfs 6 | - glusterfs-fuse 7 | state: "present" 8 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 9 | tags: add_worker 10 | 11 | - name: Debian | Install glusterfs mount utils 12 | apt: 13 | name: glusterfs-client 14 | state: "present" 15 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 16 | tags: add_worker 17 | when: enable_gfs_provisioner == 'enable' 18 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/glusterfs/templates/storageclass.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: heketi-secret 6 | namespace: kube-system 7 | stringData: 8 | key: {{ heketi_password }} 9 | type: kubernetes.io/glusterfs 10 | --- 11 | apiVersion: storage.k8s.io/v1 12 | kind: StorageClass 13 | metadata: 14 | name: gfs 15 | parameters: 16 | resturl: http://192.168.10.233:18080 17 | clusterid: "8a4ff57af81910e8324368a23afe3bdc" 18 | restauthenabled: "true" 19 | restuser: admin 20 | secretName: heketi-secret 21 | secretNamespace: kube-system 22 | gidMax: "50000" 23 | gidMin: "40000" 24 | volumetype: replicate:3 25 | provisioner: kubernetes.io/glusterfs -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/nfs/defaults/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/cluster-storage/nfs/defaults/main.yml -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/nfs/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: NFS| Prepare nfs-client directory 3 | file: name={{ kube_config_dir }}/plugins/storage-plugin/nfs state=directory 4 | 5 | - name: NFS| Prepare nfs-client file 6 | template: 7 | src: nfs-client-provisioner.yaml.j2 8 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/nfs/nfs-client-provisioner.yaml" 9 | 10 | - name: NFS| Deploy nfs-client 11 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/nfs/nfs-client-provisioner.yaml" 12 | ignore_errors: true 13 | delegate_to: "{{ groups['kube-master'][0] }}" 14 | run_once: true 15 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/nfs/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: CentOS | Install nfs-utils 2 | yum: 3 | name: nfs-utils 4 | state: present 5 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 6 | tags: add_worker 7 | 8 | - name: Debian | Install nfs-common 9 | apt: 10 | name: nfs-common 11 | state: present 12 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 13 | tags: add_worker 14 | 15 | - block: 16 | - name: NFS| Set defaultvers 17 | replace: 18 | path: /etc/nfsmount.conf 19 | regexp: '# Defaultvers=4' 20 | replace: "Defaultvers=3" 21 | 22 | - name: Set nfsvers 23 | replace: 24 | path: /etc/nfsmount.conf 25 | regexp: '# Nfsvers=4' 26 | replace: "Nfsvers=3" 27 | 28 | - name: NFS| Set nfslock 29 | replace: 30 | path: /etc/nfsmount.conf 31 | regexp: '# Lock=True' 32 | replace: "Lock=False" 33 | when: storage_nfs_server_version is defined and storage_nfs_server_version == "v3" 34 | ignore_errors: true 35 | tags: add_worker 36 | 37 | - block: 38 | - name: NFS| Prepare nfs-client directory 39 | file: name={{ kube_config_dir }}/plugins/storage-plugin/nfs state=directory 40 | 41 | - name: NFS| Prepare nfs-client file 42 | template: 43 | src: nfs-client-provisioner.yaml.j2 44 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/nfs/nfs-client-provisioner.yaml" 45 | 46 | - name: NFS| Deploy nfs-client 47 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/nfs/nfs-client-provisioner.yaml" 48 | ignore_errors: true 49 | delegate_to: "{{ groups['kube-master'][0] }}" 50 | run_once: true 51 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/nfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_nfs_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_nfs_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/nfs/templates/nfs-client-provisioner.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ storage_nfs_provisioner_name }} 5 | namespace: {{ provisioner_namespace }} 6 | 7 | --- 8 | kind: ClusterRoleBinding 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: {{ storage_nfs_provisioner_name }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ storage_nfs_provisioner_name }} 15 | namespace: {{ provisioner_namespace }} 16 | roleRef: 17 | kind: ClusterRole 18 | name: cluster-admin 19 | apiGroup: rbac.authorization.k8s.io 20 | 21 | --- 22 | kind: Deployment 23 | apiVersion: apps/v1 24 | metadata: 25 | name: {{ storage_nfs_provisioner_name }} 26 | namespace: {{ provisioner_namespace }} 27 | labels: 28 | type: nfs 29 | nfsVersion: {{ storage_nfs_server_version }} 30 | spec: 31 | replicas: 1 32 | strategy: 33 | type: Recreate 34 | selector: 35 | matchLabels: 36 | app: {{ storage_nfs_provisioner_name }} 37 | template: 38 | metadata: 39 | labels: 40 | app: {{ storage_nfs_provisioner_name }} 41 | spec: 42 | serviceAccountName: {{ storage_nfs_provisioner_name }} 43 | containers: 44 | - name: nfs-client-provisioner 45 | image: {{ nfs_client_provisioner_image }} 46 | imagePullPolicy: IfNotPresent 47 | volumeMounts: 48 | - name: nfs-client-root 49 | mountPath: /persistentvolumes 50 | env: 51 | - name: PROVISIONER_NAME 52 | value: {{ storage_nfs_provisioner_name }} 53 | - name: NFS_SERVER 54 | value: {{ storage_nfs_server }} 55 | - name: NFS_PATH 56 | value: {{ storage_nfs_server_path}} 57 | volumes: 58 | - name: nfs-client-root 59 | nfs: 60 | server: {{ storage_nfs_server }} 61 | path: {{ storage_nfs_server_path }} 62 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/defaults/main.yml: -------------------------------------------------------------------------------- 1 | HUAWEI_OCEANSTOR_URLS: "{% for url in oceanstor_urls.split(',') %}\"https://{{ url }}:8088/deviceManager/rest\"{% if not loop.last %}, {% endif %}{% endfor %}" 2 | HUAWEI_OCEANSTOR_POOLS: "{% for pool in oceanstor_pools.split(',') %}\"{{ pool }}\"{% if not loop.last %}, {% endif %}{% endfor %}" 3 | HUAWEI_OCEANSTOR_PORTAL: "{% for portal in oceanstor_portal.split(',') %}\"{{ portal }}\"{% if not loop.last %}, {% endif %}{% endfor %}" 4 | HUAWEI_OCEANSTOR_PASSWORD: "{{ oceanstor_encryption_password.stdout.split(':')[1] | trim }}" 5 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: OceanStor | Prepare oceanstor directory 3 | file: 4 | name: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor" 5 | state: directory 6 | 7 | - name: OceanStor | Prepare csi-configmap 8 | template: 9 | src: huawei-csi-configmap.yaml.j2 10 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-configmap.yaml" 11 | 12 | - block: 13 | - name: OceanStor | Prepare single csi-rbac 14 | template: 15 | src: huawei-csi-rbac.yaml.j2 16 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-rbac.yaml" 17 | 18 | - name: OceanStor | Prepare single csi-controller 19 | template: 20 | src: huawei-csi-controller.yaml.j2 21 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-controller.yaml" 22 | when: oceanstor_controller_type == "single" 23 | 24 | - block: 25 | - name: OceanStor | Prepare multi csi-rbac 26 | template: 27 | src: huawei-csi-rbac-for-multi-controller.yaml.j2 28 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-rbac-for-multi-controller.yaml" 29 | 30 | - name: OceanStor | Prepare multi csi-controller 31 | template: 32 | src: huawei-csi-multi-controller.yaml.j2 33 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-multi-controller.yaml" 34 | when: oceanstor_controller_type == "multi" 35 | 36 | - name: OceanStor | Prepare csi-node 37 | template: 38 | src: huawei-csi-node.yaml.j2 39 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-node.yaml" 40 | 41 | - name: OceanStor | Deploy csi-configmap 42 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-configmap.yaml" 43 | 44 | - block: 45 | - name: OceanStor | Deploy single csi-rbac 46 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-rbac.yaml" 47 | 48 | - name: OceanStor | Deploy single csi-controller 49 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-controller.yaml" 50 | when: oceanstor_controller_type == "single" 51 | 52 | - block: 53 | - name: OceanStor | Deploy multi csi-rbac 54 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-rbac-for-multi-controller.yaml" 55 | 56 | - name: OceanStor | Deploy multi csi-controller 57 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-multi-controller.yaml" 58 | when: oceanstor_controller_type == "multi" 59 | 60 | - name: OceanStor | Deploy csi-node 61 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/oceanstor/huawei-csi-node.yaml" 62 | delegate_to: "{{ groups['kube-master'][0] }}" 63 | when: architectures == 'amd64' 64 | run_once: true 65 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_oceanstor_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_oceanstor_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/templates/huawei-csi-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: huawei-csi-configmap 5 | namespace: {{ provisioner_namespace }} 6 | data: 7 | csi.json: | 8 | { 9 | "backends": [ 10 | { 11 | "storage": "oceanstor-san", 12 | "product": "{{ oceanstor_product }}", 13 | "name": "oceanstor", 14 | "urls": [ 15 | {{ HUAWEI_OCEANSTOR_URLS }} 16 | ], 17 | "user": "{{ oceanstor_user }}", 18 | "password": "{{ HUAWEI_OCEANSTOR_PASSWORD }}", 19 | "pools": [{{ HUAWEI_OCEANSTOR_POOLS }}], 20 | {% if oceanstor_type == "iscsi" %} 21 | "parameters": {"protocol": "iscsi", "portals": [{{ HUAWEI_OCEANSTOR_PORTAL }}]} 22 | {% elif oceanstor_type == "fc" %} 23 | "parameters": {"protocol": "fc"} 24 | {% else %} 25 | "parameters": {"portal": {{ HUAWEI_OCEANSTOR_PORTAL }}} 26 | {% endif %} 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/templates/huawei-csi-controller.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: apps/v1 4 | metadata: 5 | name: huawei-csi-controller 6 | namespace: {{ provisioner_namespace }} 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: huawei-csi-controller 12 | template: 13 | metadata: 14 | labels: 15 | app: huawei-csi-controller 16 | spec: 17 | serviceAccount: huawei-csi-controller 18 | hostNetwork: true 19 | containers: 20 | - name: csi-provisioner 21 | image: {{ huawei_csi_provisioner_image }} 22 | args: 23 | - "--csi-address=$(ADDRESS)" 24 | - "--timeout=6h" 25 | env: 26 | - name: ADDRESS 27 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 28 | imagePullPolicy: "IfNotPresent" 29 | volumeMounts: 30 | - name: socket-dir 31 | mountPath: /var/lib/csi/sockets/pluginproxy/ 32 | 33 | - name: csi-attacher 34 | image: {{ huawei_csi_attacher_image }} 35 | args: 36 | - "--csi-address=$(ADDRESS)" 37 | env: 38 | - name: ADDRESS 39 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 40 | imagePullPolicy: "IfNotPresent" 41 | volumeMounts: 42 | - name: socket-dir 43 | mountPath: /var/lib/csi/sockets/pluginproxy/ 44 | 45 | - name: huawei-csi-driver 46 | image: {{ huawei_csi_driver_image }} 47 | args: 48 | - "--endpoint=$(CSI_ENDPOINT)" 49 | - "--controller" 50 | - "--containerized" 51 | - "--driver-name=csi.huawei.com" 52 | env: 53 | - name: CSI_ENDPOINT 54 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 55 | imagePullPolicy: "IfNotPresent" 56 | volumeMounts: 57 | - name: socket-dir 58 | mountPath: /var/lib/csi/sockets/pluginproxy/ 59 | - name: log 60 | mountPath: /var/log 61 | - name: config-map 62 | mountPath: /etc/huawei 63 | 64 | volumes: 65 | - name: socket-dir 66 | emptyDir: 67 | - name: log 68 | hostPath: 69 | path: /var/log/ 70 | type: Directory 71 | - name: config-map 72 | configMap: 73 | name: huawei-csi-configmap -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/templates/huawei-csi-multi-controller.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: apps/v1 4 | metadata: 5 | name: huawei-csi-controller 6 | namespace: {{ provisioner_namespace }} 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: huawei-csi-controller 12 | template: 13 | metadata: 14 | labels: 15 | app: huawei-csi-controller 16 | spec: 17 | serviceAccount: huawei-csi-controller 18 | hostNetwork: true 19 | containers: 20 | - name: csi-provisioner 21 | image: {{ huawei_csi_provisioner_image }} 22 | args: 23 | - "--csi-address=$(ADDRESS)" 24 | - "--timeout=6h" 25 | - "--enable-leader-election" 26 | env: 27 | - name: ADDRESS 28 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 29 | imagePullPolicy: "IfNotPresent" 30 | volumeMounts: 31 | - name: socket-dir 32 | mountPath: /var/lib/csi/sockets/pluginproxy/ 33 | 34 | - name: csi-attacher 35 | image: {{ huawei_csi_attacher_image }} 36 | args: 37 | - "--csi-address=$(ADDRESS)" 38 | - "--leader-election" 39 | - "--leader-election-type=leases" 40 | env: 41 | - name: ADDRESS 42 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 43 | imagePullPolicy: "IfNotPresent" 44 | volumeMounts: 45 | - name: socket-dir 46 | mountPath: /var/lib/csi/sockets/pluginproxy/ 47 | 48 | - name: huawei-csi-driver 49 | image: {{ huawei_csi_driver_image }} 50 | args: 51 | - "--endpoint=$(CSI_ENDPOINT)" 52 | - "--controller" 53 | - "--containerized" 54 | env: 55 | - name: CSI_ENDPOINT 56 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 57 | imagePullPolicy: "IfNotPresent" 58 | volumeMounts: 59 | - name: socket-dir 60 | mountPath: /var/lib/csi/sockets/pluginproxy/ 61 | - name: log 62 | mountPath: /var/log 63 | - name: config-map 64 | mountPath: /etc/huawei 65 | 66 | volumes: 67 | - name: socket-dir 68 | emptyDir: 69 | - name: log 70 | hostPath: 71 | path: /var/log/ 72 | type: Directory 73 | - name: config-map 74 | configMap: 75 | name: huawei-csi-configmap -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/templates/huawei-csi-node.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: DaemonSet 3 | apiVersion: apps/v1 4 | metadata: 5 | name: huawei-csi-node 6 | namespace: {{ provisioner_namespace }} 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: huawei-csi-node 11 | template: 12 | metadata: 13 | labels: 14 | app: huawei-csi-node 15 | spec: 16 | serviceAccountName: huawei-csi-node 17 | hostNetwork: true 18 | containers: 19 | - name: csi-node-driver-registrar 20 | image: {{ huawei_csi_node_driver_registrar_image }} 21 | args: 22 | - "--csi-address=/csi/csi.sock" 23 | - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi.huawei.com/csi.sock" 24 | imagePullPolicy: "IfNotPresent" 25 | volumeMounts: 26 | - name: plugin-dir 27 | mountPath: /csi 28 | - name: registration-dir 29 | mountPath: /registration 30 | 31 | - name: huawei-csi-driver 32 | image: {{ huawei_csi_driver_image }} 33 | args: 34 | - "--endpoint=/csi/csi.sock" 35 | - "--containerized" 36 | - "--driver-name=csi.huawei.com" 37 | securityContext: 38 | privileged: true 39 | capabilities: 40 | add: ["SYS_ADMIN"] 41 | allowPrivilegeEscalation: true 42 | imagePullPolicy: "IfNotPresent" 43 | lifecycle: 44 | preStop: 45 | exec: 46 | command: ["/bin/sh", "-c", "rm -f /csi/csi.sock"] 47 | volumeMounts: 48 | - name: plugin-dir 49 | mountPath: /csi 50 | - name: pods-dir 51 | mountPath: /var/lib/kubelet 52 | mountPropagation: "Bidirectional" 53 | - name: etc-dir 54 | mountPath: /etc 55 | - name: log-dir 56 | mountPath: /var/log 57 | - name: dev-dir 58 | mountPath: /dev 59 | mountPropagation: "HostToContainer" 60 | - name: iscsi-dir 61 | mountPath: /var/lib/iscsi 62 | - name: config-map 63 | mountPath: /etc/huawei 64 | volumes: 65 | - name: plugin-dir 66 | hostPath: 67 | path: /var/lib/kubelet/plugins/csi.huawei.com 68 | type: DirectoryOrCreate 69 | - name: registration-dir 70 | hostPath: 71 | path: /var/lib/kubelet/plugins_registry 72 | type: Directory 73 | - name: pods-dir 74 | hostPath: 75 | path: /var/lib/kubelet 76 | type: Directory 77 | - name: etc-dir 78 | hostPath: 79 | path: /etc 80 | type: Directory 81 | - name: dev-dir 82 | hostPath: 83 | path: /dev 84 | type: Directory 85 | - name: iscsi-dir 86 | hostPath: 87 | path: /var/lib/iscsi 88 | - name: log-dir 89 | hostPath: 90 | path: /var/log/ 91 | type: Directory 92 | - name: config-map 93 | configMap: 94 | name: huawei-csi-configmap -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/oceanstor/templates/huawei-csi-rbac-for-multi-controller.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: huawei-csi-controller 6 | namespace: {{ provisioner_namespace }} 7 | 8 | --- 9 | kind: ClusterRoleBinding 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | metadata: 12 | name: huawei-csi-provisioner-role 13 | subjects: 14 | - kind: ServiceAccount 15 | name: huawei-csi-controller 16 | namespace: {{ provisioner_namespace }} 17 | roleRef: 18 | kind: ClusterRole 19 | name: cluster-admin 20 | apiGroup: rbac.authorization.k8s.io 21 | 22 | --- 23 | kind: ClusterRoleBinding 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | metadata: 26 | name: huawei-csi-attacher-role 27 | subjects: 28 | - kind: ServiceAccount 29 | name: huawei-csi-controller 30 | namespace: {{ provisioner_namespace }} 31 | roleRef: 32 | kind: ClusterRole 33 | name: cluster-admin 34 | apiGroup: rbac.authorization.k8s.io 35 | 36 | --- 37 | apiVersion: v1 38 | kind: ServiceAccount 39 | metadata: 40 | name: huawei-csi-node 41 | namespace: {{ provisioner_namespace }} 42 | 43 | --- 44 | kind: ClusterRoleBinding 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | metadata: 47 | name: huawei-csi-driver-registrar-role 48 | subjects: 49 | - kind: ServiceAccount 50 | name: huawei-csi-node 51 | namespace: {{ provisioner_namespace }} 52 | roleRef: 53 | kind: ClusterRole 54 | name: cluster-admin 55 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/rook-ceph/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | # rook-ceph 2 | mon_count: "{% if groups['all'] | length < 3 %}2{% else %}3{% endif %}" 3 | 4 | # set default StorageClass 5 | default_label: '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' 6 | 7 | storageClassName: "ceph-rook-storageclass" -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/rook-ceph/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Prepare rook-ceph deploy directory 3 | file: name={{ kube_config_dir }}/plugins/storage-plugin/rook-ceph state=directory 4 | 5 | - block: 6 | - name: Prepare replicapool yaml 7 | template: 8 | src: replicapool.yaml.j2 9 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/replicapool.yaml" 10 | 11 | - name: Delete replicapool yaml 12 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/replicapool.yaml" 13 | ignore_errors: true 14 | 15 | - name: Prepare filesystem yaml 16 | template: 17 | src: filesystem.yaml.j2 18 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/filesystem.yaml" 19 | 20 | - name: Delete filesystem yaml 21 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/filesystem.yaml" 22 | ignore_errors: true 23 | 24 | - block: 25 | - name: Prepare cluster yaml 26 | template: 27 | src: cluster.yaml.j2 28 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/cluster.yaml" 29 | 30 | - name: Delete cluster yaml 31 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/cluster.yaml" 32 | 33 | - block: 34 | - name: Prepare operator file 35 | template: 36 | src: operator.yaml.j2 37 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/operator.yaml" 38 | 39 | - name: Delete rook-operator 40 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/operator.yaml" 41 | 42 | - name: Prepare common yaml 43 | template: 44 | src: common.yaml.j2 45 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/common.yaml" 46 | 47 | - name: Delete common yaml 48 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/common.yaml" 49 | 50 | - name: Prepare crds yaml 51 | template: 52 | src: crds.yaml.j2 53 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/crds.yaml" 54 | 55 | - name: Delete crds yaml 56 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/crds.yaml" 57 | when: architectures == 'amd64' 58 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/rook-ceph/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - block: 3 | - name: Prepare rook-ceph deploy directory 4 | file: name={{ kube_config_dir }}/plugins/storage-plugin/rook-ceph state=directory 5 | 6 | - name: Prepare crds yaml 7 | template: 8 | src: crds.yaml.j2 9 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/crds.yaml" 10 | 11 | - name: Deploy crds yaml 12 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/crds.yaml" 13 | 14 | - name: Prepare common yaml 15 | template: 16 | src: common.yaml.j2 17 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/common.yaml" 18 | 19 | - name: Deploy common yaml 20 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/common.yaml" 21 | 22 | - name: Prepare operator file 23 | template: 24 | src: operator.yaml.j2 25 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/operator.yaml" 26 | 27 | - name: Deploy rook-operator 28 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/operator.yaml" 29 | 30 | - name: Waiting for the operator pod to run 31 | shell: "{{ bin_dir }}/kubectl -n rook-ceph get pod -o wide | grep rook-ceph-operator | awk '{print $3}'" 32 | register: operator_pod_status 33 | until: operator_pod_status.stdout == "Running" 34 | retries: 30 35 | delay: 10 36 | 37 | - block: 38 | - name: Prepare cluster yaml 39 | template: 40 | src: cluster.yaml.j2 41 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/cluster.yaml" 42 | 43 | - name: Deploy cluster yaml 44 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/cluster.yaml" 45 | 46 | - name: Waiting for the mgr pod to run 47 | shell: "{{ bin_dir }}/kubectl -n rook-ceph get pod -o wide | grep rook-ceph-mgr | awk '{print $3}'" 48 | register: mgr_pod_status 49 | until: mgr_pod_status.stdout == "Running" 50 | retries: 60 51 | delay: 10 52 | 53 | - block: 54 | - name: Prepare replicapool yaml 55 | template: 56 | src: replicapool.yaml.j2 57 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/replicapool.yaml" 58 | 59 | - name: Deploy replicapool yaml 60 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/replicapool.yaml" 61 | 62 | - name: Prepare filesystem yaml 63 | template: 64 | src: filesystem.yaml.j2 65 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/filesystem.yaml" 66 | 67 | - name: Deploy filesystem yaml 68 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/rook-ceph/filesystem.yaml" 69 | when: architectures == 'amd64' 70 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/rook-ceph/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_rook_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_rook_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/rook-ceph/templates/replicapool.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephBlockPool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph # namespace:cluster 6 | spec: 7 | failureDomain: host 8 | replicated: 9 | size: 3 10 | # Disallow setting pool with replica 1, this could lead to data loss without recovery. 11 | # Make sure you're *ABSOLUTELY CERTAIN* that is what you want 12 | requireSafeReplicaSize: true 13 | # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool 14 | # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size 15 | #targetSizeRatio: .5 -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/defaults/main.yml: -------------------------------------------------------------------------------- 1 | csi_endpoint: '{% if vc_version >= "7.0.1" %}/csi{% else %}/var/lib/csi/sockets/pluginproxy{% endif %}' -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: vSphere | Create cloud-config directory 3 | file: 4 | name: "{{ cloud_config_dir }}" 5 | state: directory 6 | 7 | - name: vSphere | Generate CSI cloud-config 8 | template: 9 | src: "{{ item }}.j2" 10 | dest: "{{ cloud_config_dir }}/{{ item }}" 11 | mode: 0640 12 | with_items: 13 | - vsphere-csi-cloud-config 14 | 15 | - name: vSphere | Prepare vSphere directory 16 | file: 17 | name: "{{ kube_config_dir }}/plugins/storage-plugin/vsphere" 18 | state: directory 19 | 20 | - name: vSphere | Generate Manifests 21 | template: 22 | src: "{{ item }}.j2" 23 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/vsphere/{{ item }}" 24 | with_items: 25 | - vsphere-csi-controller-config.yml 26 | - vsphere-csi-controller-deployment.yml 27 | - vsphere-csi-controller-rbac.yml 28 | - vsphere-csi-controller-service.yml 29 | - vsphere-csi-driver.yml 30 | - vsphere-csi-node-rbac.yml 31 | - vsphere-csi-node.yml 32 | register: vsphere_csi_manifests 33 | 34 | - name: vSphere | Generate a CSI secret manifest 35 | command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ cloud_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" 36 | register: vsphere_csi_secret_manifest 37 | no_log: true 38 | 39 | - name: vSphere | Delete a CSI secret manifest 40 | command: 41 | cmd: "{{ bin_dir }}/kubectl delete -f -" 42 | stdin: "{{ vsphere_csi_secret_manifest.stdout }}" 43 | no_log: true 44 | 45 | - name: vSphere | vSphere CSI Driver | Delete Manifests 46 | shell: "{{ bin_dir }}/kubectl delete -f {{ kube_config_dir }}/plugins/storage-plugin/vsphere/{{ item.item }}" 47 | with_items: 48 | - "{{ vsphere_csi_manifests.results }}" 49 | loop_control: 50 | label: "{{ item.item }}" 51 | when: architectures == 'amd64' -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: vSphere | Download govc file 3 | get_url: 4 | validate_certs: no 5 | url: "{{ govc_download_url }}" 6 | dest: "{{ base_dir }}/" 7 | timeout: "{{ download_timeout_online }}" 8 | 9 | - name: vSphere | Unarchive govc file 10 | shell: "gunzip {{ base_dir }}/govc_linux_{{ architectures }}.gz" 11 | 12 | - name: vSphere | Copy govc file 13 | copy: 14 | src: "{{ base_dir}}/govc_linux_{{ architectures }}" 15 | dest: "{{ bin_dir }}/govc" 16 | remote_src: yes 17 | mode: "0755" 18 | 19 | - name: vSphere | Remove govc file 20 | file: 21 | name: "{{ base_dir }}/govc_linux_{{ architectures }}" 22 | state: absent 23 | 24 | - name: CentOS | Install jq 25 | yum: 26 | name: jq 27 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 28 | 29 | - name: Debian | Install jq 30 | apt: 31 | name: jq 32 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 33 | 34 | - name: vSphere | Create cloud-config directory 35 | file: 36 | name: "{{ cloud_config_dir }}" 37 | state: directory 38 | 39 | - name: vSphere | Prepare setup.sh 40 | template: 41 | src: "{{ item }}.j2" 42 | dest: "{{ cloud_config_dir }}/{{ item }}" 43 | with_items: 44 | - setup.sh 45 | 46 | - name: vSphere | Set disk.enableuuid to true for all vms 47 | shell: "bash setup.sh" 48 | args: 49 | chdir: "{{ cloud_config_dir }}" 50 | no_log: true 51 | ignore_errors: true 52 | 53 | - name: vSphere | Generate CSI cloud-config 54 | template: 55 | src: "{{ item }}.j2" 56 | dest: "{{ cloud_config_dir }}/{{ item }}" 57 | mode: 0640 58 | with_items: 59 | - vsphere-csi-cloud-config 60 | 61 | - name: vSphere | Prepare vSphere directory 62 | file: 63 | name: "{{ kube_config_dir }}/plugins/storage-plugin/vsphere" 64 | state: directory 65 | 66 | - name: vSphere | Generate Manifests 67 | template: 68 | src: "{{ item }}.j2" 69 | dest: "{{ kube_config_dir }}/plugins/storage-plugin/vsphere/{{ item }}" 70 | with_items: 71 | - vsphere-csi-controller-config.yml 72 | - vsphere-csi-controller-deployment.yml 73 | - vsphere-csi-controller-rbac.yml 74 | - vsphere-csi-controller-service.yml 75 | - vsphere-csi-driver.yml 76 | - vsphere-csi-node-rbac.yml 77 | - vsphere-csi-node.yml 78 | register: vsphere_csi_manifests 79 | 80 | - name: vSphere | Generate a CSI secret manifest 81 | command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ cloud_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml" 82 | register: vsphere_csi_secret_manifest 83 | no_log: true 84 | 85 | - name: vSphere | Apply a CSI secret manifest 86 | command: 87 | cmd: "{{ bin_dir }}/kubectl apply -f -" 88 | stdin: "{{ vsphere_csi_secret_manifest.stdout }}" 89 | no_log: true 90 | 91 | - name: vSphere | vSphere CSI Driver | Apply Manifests 92 | shell: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/plugins/storage-plugin/vsphere/{{ item.item }}" 93 | with_items: 94 | - "{{ vsphere_csi_manifests.results }}" 95 | loop_control: 96 | label: "{{ item.item }}" 97 | when: architectures == 'amd64' -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: enable.yml 2 | when: enable_vsphere_provisioner == 'enable' 3 | 4 | - include_tasks: disable.yml 5 | when: enable_vsphere_provisioner == 'disable' 6 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/setup.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export GOVC_URL="{{ vc_host }}" 4 | export GOVC_USERNAME="{{ vc_username }}" 5 | export GOVC_PASSWORD='{{ vc_password }}' 6 | export GOVC_INSECURE="1" 7 | DATACENTER="{{ datacenter }}" 8 | FOLDER="{{ folder }}" 9 | 10 | IFS=$'\n' 11 | for vm in $(govc ls "/$DATACENTER/vm/$FOLDER" | grep {{ cluster_name }}); do 12 | MACHINE_INFO=$(govc vm.info -json -dc=$DATACENTER -vm.ipath="/$vm" -e=true) 13 | VM_NAME=$(jq -r ' .VirtualMachines[] | .Name' <<< $MACHINE_INFO | awk '{print tolower($0)}') 14 | echo "govc vm.change -e=\"disk.enableUUID=1\" -vm=\"/$DATACENTER/vm/$FOLDER/$VM_NAME\"" 15 | govc vm.change -e="disk.enableUUID=1" -vm="/$DATACENTER/vm/$FOLDER/$VM_NAME" 16 | done -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/vsphere-csi-cloud-config.j2: -------------------------------------------------------------------------------- 1 | [Global] 2 | cluster-id = "{{ cluster_name }}" 3 | 4 | [VirtualCenter "{{ vc_host }}"] 5 | insecure-flag = "true" 6 | user = "{{ vc_username }}" 7 | password = "{{ vc_password }}" 8 | port = "{{ vc_port }}" 9 | datacenters = "{{ datacenter }}" -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/vsphere-csi-controller-config.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: internal-feature-states.csi.vsphere.vmware.com 5 | namespace: {{ provisioner_namespace }} 6 | data: 7 | "csi-migration": "false" 8 | {% if vc_version >= "7.0" %} 9 | "csi-auth-check": "true" 10 | {% else %} 11 | "csi-auth-check": "false" 12 | {% endif %} 13 | "online-volume-extend": "true" 14 | "trigger-csi-fullsync": "false" 15 | "async-query-volume": "true" 16 | "improved-csi-idempotency": "true" 17 | "improved-volume-topology": "true" 18 | "block-volume-snapshot": "false" 19 | "csi-windows-support": "false" 20 | "use-csinode-id": "true" 21 | "pv-to-backingdiskobjectid-mapping": "false" 22 | "cnsmgr-suspend-create-volume": "false" 23 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/vsphere-csi-controller-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ServiceAccount 2 | apiVersion: v1 3 | metadata: 4 | name: vsphere-csi-controller 5 | namespace: {{ provisioner_namespace }} 6 | --- 7 | kind: ClusterRole 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: vsphere-csi-controller-role 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["nodes", "pods", "configmaps"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: [""] 16 | resources: ["persistentvolumeclaims"] 17 | verbs: ["get", "list", "watch", "update"] 18 | {% if vc_version >= "7.0" %} 19 | - apiGroups: [""] 20 | resources: ["persistentvolumeclaims/status"] 21 | verbs: ["patch"] 22 | verbs: ["update", "patch"] 23 | {% endif %} 24 | - apiGroups: [""] 25 | resources: ["persistentvolumes"] 26 | verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] 27 | - apiGroups: [""] 28 | resources: ["events"] 29 | verbs: ["get", "list", "watch", "create", "update", "patch"] 30 | - apiGroups: ["coordination.k8s.io"] 31 | resources: ["leases"] 32 | verbs: ["get", "watch", "list", "delete", "update", "create"] 33 | - apiGroups: ["storage.k8s.io"] 34 | resources: ["storageclasses","csinodes"] 35 | verbs: ["get", "list", "watch"] 36 | - apiGroups: ["storage.k8s.io"] 37 | resources: ["volumeattachments"] 38 | verbs: ["get", "list", "watch", "patch", "update"] 39 | - apiGroups: ["cns.vmware.com"] 40 | resources: ["triggercsifullsyncs"] 41 | verbs: ["create", "get", "update", "watch", "list"] 42 | - apiGroups: ["cns.vmware.com"] 43 | resources: ["cnsvspherevolumemigrations"] 44 | verbs: ["create", "get", "list", "watch", "update", "delete"] 45 | - apiGroups: ["apiextensions.k8s.io"] 46 | resources: ["customresourcedefinitions"] 47 | verbs: ["get", "create", "update"] 48 | - apiGroups: ["cns.vmware.com"] 49 | resources: ["cnsvolumeoperationrequests"] 50 | verbs: ["create", "get", "list", "update", "delete"] 51 | - apiGroups: [ "cns.vmware.com" ] 52 | resources: [ "csinodetopologies" ] 53 | verbs: ["get", "update", "watch", "list"] 54 | - apiGroups: ["storage.k8s.io"] 55 | resources: ["volumeattachments/status"] 56 | verbs: ["patch"] 57 | - apiGroups: [ "snapshot.storage.k8s.io" ] 58 | resources: [ "volumesnapshots" ] 59 | verbs: [ "get", "list" ] 60 | - apiGroups: [ "snapshot.storage.k8s.io" ] 61 | resources: [ "volumesnapshotclasses" ] 62 | verbs: [ "watch", "get", "list" ] 63 | - apiGroups: [ "snapshot.storage.k8s.io" ] 64 | resources: [ "volumesnapshotcontents" ] 65 | verbs: [ "create", "get", "list", "watch", "update", "delete", "patch" ] 66 | - apiGroups: [ "snapshot.storage.k8s.io" ] 67 | resources: [ "volumesnapshotcontents/status" ] 68 | verbs: [ "update", "patch" ] 69 | --- 70 | kind: ClusterRoleBinding 71 | apiVersion: rbac.authorization.k8s.io/v1 72 | metadata: 73 | name: vsphere-csi-controller-binding 74 | subjects: 75 | - kind: ServiceAccount 76 | name: vsphere-csi-controller 77 | namespace: {{ provisioner_namespace }} 78 | roleRef: 79 | kind: ClusterRole 80 | name: vsphere-csi-controller-role 81 | apiGroup: rbac.authorization.k8s.io 82 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/vsphere-csi-controller-service.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: vsphere-csi-controller 5 | namespace: {{ provisioner_namespace }} 6 | labels: 7 | app: vsphere-csi-controller 8 | spec: 9 | ports: 10 | - name: ctlr 11 | port: 2112 12 | targetPort: 2112 13 | protocol: TCP 14 | - name: syncer 15 | port: 2113 16 | targetPort: 2113 17 | protocol: TCP 18 | selector: 19 | app: vsphere-csi-controller 20 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/vsphere-csi-driver.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: csi.vsphere.vmware.com 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: false 8 | -------------------------------------------------------------------------------- /roles/plugins/cluster-storage/vsphere/templates/vsphere-csi-node-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: vsphere-csi-node 6 | namespace: {{ provisioner_namespace }} 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: vsphere-csi-node-cluster-role 12 | rules: 13 | - apiGroups: ["cns.vmware.com"] 14 | resources: ["csinodetopologies"] 15 | verbs: ["create", "watch", "get", "patch" ] 16 | - apiGroups: [""] 17 | resources: ["nodes"] 18 | verbs: ["get"] 19 | --- 20 | kind: ClusterRoleBinding 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | metadata: 23 | name: vsphere-csi-node-cluster-role-binding 24 | subjects: 25 | - kind: ServiceAccount 26 | name: vsphere-csi-node 27 | namespace: {{ provisioner_namespace }} 28 | roleRef: 29 | kind: ClusterRole 30 | name: vsphere-csi-node-cluster-role 31 | apiGroup: rbac.authorization.k8s.io 32 | --- 33 | kind: Role 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | metadata: 36 | name: vsphere-csi-node-role 37 | namespace: {{ provisioner_namespace }} 38 | rules: 39 | - apiGroups: [""] 40 | resources: ["configmaps"] 41 | verbs: ["get", "list", "watch"] 42 | --- 43 | kind: RoleBinding 44 | apiVersion: rbac.authorization.k8s.io/v1 45 | metadata: 46 | name: vsphere-csi-node-binding 47 | namespace: {{ provisioner_namespace }} 48 | subjects: 49 | - kind: ServiceAccount 50 | name: vsphere-csi-node 51 | namespace: {{ provisioner_namespace }} 52 | roleRef: 53 | kind: Role 54 | name: vsphere-csi-node-role 55 | apiGroup: rbac.authorization.k8s.io 56 | -------------------------------------------------------------------------------- /roles/plugins/dns-cache/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - name: Copy dns-cache file 2 | template: 3 | src: iptables.yaml.j2 4 | dest: /etc/kubernetes/plugins/dns-cache/nodelocaldns.yaml 5 | when: kube_proxy_mode == "iptables" 6 | ignore_errors: true 7 | 8 | - name: Copy dns-cache file 9 | template: 10 | src: ipvs.yaml.j2 11 | dest: /etc/kubernetes/plugins/dns-cache/nodelocaldns.yaml 12 | when: kube_proxy_mode == "ipvs" 13 | ignore_errors: true 14 | 15 | - name: Delete dns-cache ds 16 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/dns-cache/nodelocaldns.yaml" 17 | ignore_errors: true 18 | 19 | - block: 20 | - name: Modify the kubelet config file 21 | lineinfile: 22 | path: "/var/lib/kubelet/config.yaml" 23 | regexp: '- {{ pillar_local_dns }}' 24 | line: "- {{ kube_dns_clusterip.stdout }}" 25 | delegate_to: "{{ item }}" 26 | with_items: "{{ groups['all'] }}" 27 | ignore_errors: true 28 | 29 | - name: Restart kubelet 30 | service: 31 | name: kubelet 32 | state: restarted 33 | delegate_to: "{{ item }}" 34 | with_items: "{{ groups['all'] }}" 35 | ignore_errors: true 36 | when: kube_proxy_mode == "ipvs" -------------------------------------------------------------------------------- /roles/plugins/dns-cache/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: Copy dns-cache file 2 | template: 3 | src: iptables.yaml.j2 4 | dest: /etc/kubernetes/plugins/dns-cache/nodelocaldns.yaml 5 | when: kube_proxy_mode == "iptables" 6 | 7 | - name: Copy dns-cache file 8 | template: 9 | src: ipvs.yaml.j2 10 | dest: /etc/kubernetes/plugins/dns-cache/nodelocaldns.yaml 11 | when: kube_proxy_mode == "ipvs" 12 | 13 | - name: Ensure that apiserver accesses the etcd cluster properly 14 | shell: "{{ bin_dir }}/kubectl get ns" 15 | register: etcd_status 16 | until: '"kube-system" in etcd_status.stdout' 17 | retries: 10 18 | delay: 6 19 | 20 | - name: Deploy dns-cache ds 21 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/dns-cache/nodelocaldns.yaml" 22 | 23 | - block: 24 | - name: Modify the kubelet config file 25 | lineinfile: 26 | path: "/var/lib/kubelet/config.yaml" 27 | regexp: '- {{ kube_dns_clusterip.stdout }}' 28 | line: "- {{ pillar_local_dns }}" 29 | delegate_to: "{{ item }}" 30 | with_items: "{{ groups['all'] }}" 31 | 32 | - name: Restart kubelet 33 | service: 34 | name: kubelet 35 | state: restarted 36 | delegate_to: "{{ item }}" 37 | with_items: "{{ groups['all'] }}" 38 | when: kube_proxy_mode == "ipvs" -------------------------------------------------------------------------------- /roles/plugins/dns-cache/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create dns-cache directory 2 | file: 3 | path: /etc/kubernetes/plugins/dns-cache 4 | state: directory 5 | 6 | - name: Get ClusterIP of kube-dns 7 | shell: "{{ bin_dir }}/kubectl get svc -n kube-system | grep kube-dns | grep -v upstream | awk '{ print $3 }'" 8 | register: kube_dns_clusterip 9 | 10 | - block: 11 | - include_tasks: enable.yml 12 | when: component_created_by == 'cluster' 13 | 14 | - include_tasks: "{{ item }}" 15 | with_items: 16 | - disable.yml 17 | - enable.yml 18 | when: component_created_by == 'component' 19 | when: enable_dns_cache == 'enable' 20 | 21 | - include_tasks: disable.yml 22 | when: 23 | - enable_dns_cache == 'disable' 24 | - component_created_by == 'component' 25 | -------------------------------------------------------------------------------- /roles/plugins/gpu/gpu-operator/files/gpu-operator-v1.7.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/gpu/gpu-operator/files/gpu-operator-v1.7.0.tgz -------------------------------------------------------------------------------- /roles/plugins/gpu/gpu-operator/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - name: GPU| Delete NVIDIA GPU Operator 2 | command: "helm uninstall ko-gpu-operator -n kube-operator" 3 | ignore_errors: true 4 | 5 | - name: GPU| Delete Namespace gpu-operator-resources 6 | command: "{{ bin_dir }}/kubectl delete ns gpu-operator-resources" 7 | ignore_errors: true -------------------------------------------------------------------------------- /roles/plugins/gpu/gpu-operator/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: Confirm GPU Operator exist 2 | command: "{{ bin_dir }}/kubectl get ns -o name" 3 | register: nvidia_gpu_operator_exist 4 | 5 | - name: Copy GPU Operator chart 6 | copy: src=gpu-operator-v1.7.0.tgz dest={{ base_dir }}/ mode=0644 7 | 8 | - name: Confirm Namespace gpu-operator-resources exist 9 | command: "{{ bin_dir }}/kubectl create ns gpu-operator-resources" 10 | when: '"namespace/gpu-operator-resources" not in nvidia_gpu_operator_exist.stdout' 11 | 12 | - name: Confirm Namespace kube-operator exist 13 | command: "{{ bin_dir }}/kubectl create ns kube-operator" 14 | when: '"namespace/kube-operator" not in nvidia_gpu_operator_exist.stdout' 15 | 16 | - name: GPU| Create NVIDIA GPU Operator 17 | shell: "helm upgrade -i ko-gpu-operator {{ base_dir }}/gpu-operator-v1.7.0.tgz -n kube-operator \ 18 | --set toolkit.version=1.4.7-ubi8 \ 19 | --set operator.defaultRuntime=docker \ 20 | --set validator.repository=registry.kubeoperator.io:8082/kubeoperator \ 21 | --set operator.repository=registry.kubeoperator.io:8082/kubeoperator \ 22 | --set operator.initContainer.repository=registry.kubeoperator.io:8082/kubeoperator \ 23 | --set driver.repository=registry.kubeoperator.io:8082/kubeoperator \ 24 | --set toolkit.repository=registry.kubeoperator.io:8082/kubeoperator \ 25 | --set devicePlugin.repository=registry.kubeoperator.io:8082/kubeoperator \ 26 | --set dcgmExporter.repository=registry.kubeoperator.io:8082/kubeoperator \ 27 | --set gfd.repository=registry.kubeoperator.io:8082/kubeoperator \ 28 | --set migManager.repository=registry.kubeoperator.io:8082/kubeoperator \ 29 | --set node-feature-discovery.image.repository=registry.kubeoperator.io:8082/kubeoperator/node-feature-discovery" -------------------------------------------------------------------------------- /roles/plugins/gpu/gpu-operator/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - include_tasks: enable.yml 3 | when: component_created_by == 'cluster' 4 | 5 | - include_tasks: "{{ item }}" 6 | with_items: 7 | - disable.yml 8 | - enable.yml 9 | when: component_created_by == 'component' 10 | when: enable_gpu == 'enable' 11 | 12 | - include_tasks: disable.yml 13 | when: 14 | - enable_gpu == 'disable' 15 | - component_created_by == 'component' 16 | -------------------------------------------------------------------------------- /roles/plugins/ingress-controller/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | # traefik默认证书过期时间(天) 5 | traefik_certs_expired: 3650 -------------------------------------------------------------------------------- /roles/plugins/ingress-controller/tasks/disable-nginx.yml: -------------------------------------------------------------------------------- 1 | - name: Copy nginx-ingress-controller file 2 | template: 3 | src: nginx-ingress-controller.yaml.j2 4 | dest: /etc/kubernetes/plugins/ingress-controller/nginx-ingress-controller.yaml 5 | when: "nginx_ingress_version == '0.33.0'" 6 | 7 | - name: Copy nginx-ingress-controller file 8 | template: 9 | src: nginx-ingress-controller-v1.1.1.yaml.j2 10 | dest: /etc/kubernetes/plugins/ingress-controller/nginx-ingress-controller.yaml 11 | when: nginx_ingress_version in [ 'v1.1.1','v1.2.1' ] 12 | 13 | - name: Delete nginx-ingress-controller 14 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/ingress-controller/nginx-ingress-controller.yaml" 15 | ignore_errors: true -------------------------------------------------------------------------------- /roles/plugins/ingress-controller/tasks/disable-traefik.yml: -------------------------------------------------------------------------------- 1 | - name: Copy traefik-ingress-controller crds file 2 | template: 3 | src: traefik-ingress-controller/crds.yaml.j2 4 | dest: /etc/kubernetes/plugins/ingress-controller/crds.yaml 5 | 6 | - name: Copy traefik-ingress-controller file 7 | template: 8 | src: traefik-ingress-controller/traefik-ingress-controller.yaml.j2 9 | dest: /etc/kubernetes/plugins/ingress-controller/traefik-ingress-controller.yaml 10 | 11 | - name: Delete traefik-ingress-controller 12 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/ingress-controller/traefik-ingress-controller.yaml" 13 | ignore_errors: true 14 | 15 | - name: Delete traefik-ingress-controller crds 16 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/ingress-controller/crds.yaml" 17 | ignore_errors: true -------------------------------------------------------------------------------- /roles/plugins/ingress-controller/tasks/enable-nginx.yml: -------------------------------------------------------------------------------- 1 | - name: Copy nginx-ingress-controller file 2 | template: 3 | src: nginx-ingress-controller.yaml.j2 4 | dest: /etc/kubernetes/plugins/ingress-controller/nginx-ingress-controller.yaml 5 | when: "nginx_ingress_version == '0.33.0'" 6 | 7 | - name: Copy nginx-ingress-controller file 8 | template: 9 | src: nginx-ingress-controller-v1.1.1.yaml.j2 10 | dest: /etc/kubernetes/plugins/ingress-controller/nginx-ingress-controller.yaml 11 | when: nginx_ingress_version in [ 'v1.1.1','v1.2.1' ] 12 | 13 | - name: Ensure that apiserver accesses the etcd cluster properly 14 | shell: "{{ bin_dir }}/kubectl get ns" 15 | register: etcd_status 16 | until: '"kube-system" in etcd_status.stdout' 17 | retries: 10 18 | delay: 6 19 | 20 | - name: Deploy nginx-ingress-controller 21 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/ingress-controller/nginx-ingress-controller.yaml" -------------------------------------------------------------------------------- /roles/plugins/ingress-controller/tasks/enable-traefik.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Copy traefik-ingress-controller crds file 3 | template: 4 | src: traefik-ingress-controller/crds.yaml.j2 5 | dest: /etc/kubernetes/plugins/ingress-controller/crds.yaml 6 | 7 | - name: Copy traefik-ingress-controller file 8 | template: 9 | src: traefik-ingress-controller/traefik-ingress-controller.yaml.j2 10 | dest: /etc/kubernetes/plugins/ingress-controller/traefik-ingress-controller.yaml 11 | 12 | # - name: 创建 traefik-ingress-controller 默认证书 13 | # shell: > 14 | # openssl req -x509 -nodes -days {{ traefik_certs_expired }} \ 15 | # -newkey rsa:2048 \ 16 | # -keyout /etc/kubernetes/plugins/ingress-controller/tls.key \ 17 | # -out /etc/kubernetes/plugins/ingress-controller/tls.crt \ 18 | # -subj "/CN=kubeoperator.io" 19 | 20 | # - name: 应用 traefik-ingress-controller 默认证书 21 | # shell: > 22 | # kubectl create ns ingress-controller 23 | # --dry-run -o yaml | kubectl apply -f - && 24 | # kubectl -n ingress-controller create secret tls \ 25 | # traefik-default-cert \ 26 | # --key=/etc/kubernetes/plugins/ingress-controller/tls.key \ 27 | # --cert=/etc/kubernetes/plugins/ingress-controller/tls.crt \ 28 | # --dry-run -o yaml | kubectl apply -f - 29 | 30 | - name: Ensure that apiserver accesses the etcd cluster properly 31 | shell: "{{ bin_dir }}/kubectl get ns" 32 | register: etcd_status 33 | until: '"kube-system" in etcd_status.stdout' 34 | retries: 10 35 | delay: 6 36 | 37 | - name: Deploy traefik-ingress-controller crds 38 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/ingress-controller/crds.yaml" 39 | 40 | - name: Ensure that apiserver accesses the etcd cluster properly 41 | shell: "{{ bin_dir }}/kubectl get ns" 42 | register: etcd_status 43 | until: '"kube-system" in etcd_status.stdout' 44 | retries: 10 45 | delay: 6 46 | 47 | - name: Deploy traefik-ingress-controller 48 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/ingress-controller/traefik-ingress-controller.yaml" -------------------------------------------------------------------------------- /roles/plugins/ingress-controller/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create ingress directory 2 | file: 3 | path: /etc/kubernetes/plugins/ingress-controller 4 | state: directory 5 | 6 | - name: Get kubernetes version 7 | shell: "{{ bin_dir }}/kubeadm version -o short" 8 | register: kubeadm_version_output 9 | 10 | - block: 11 | - block: 12 | - include_tasks: enable-nginx.yml 13 | when: component_created_by == 'cluster' 14 | 15 | - include_tasks: "{{ item }}" 16 | with_items: 17 | - disable-nginx.yml 18 | - enable-nginx.yml 19 | when: component_created_by == 'component' 20 | when: enable_nginx == 'enable' 21 | 22 | - include_tasks: disable-nginx.yml 23 | when: 24 | - enable_nginx == 'disable' 25 | - component_created_by == 'component' 26 | when: ingress_controller_type == 'nginx' 27 | 28 | - block: 29 | - block: 30 | - include_tasks: enable-traefik.yml 31 | when: component_created_by == 'cluster' 32 | 33 | - include_tasks: "{{ item }}" 34 | with_items: 35 | - disable-traefik.yml 36 | - enable-traefik.yml 37 | when: component_created_by == 'component' 38 | when: enable_traefik == 'enable' 39 | 40 | - include_tasks: disable-traefik.yml 41 | when: 42 | - enable_traefik == 'disable' 43 | - component_created_by == 'component' 44 | when: ingress_controller_type == 'traefik' 45 | -------------------------------------------------------------------------------- /roles/plugins/istio/files/istio-base-1.1.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/istio/files/istio-base-1.1.0.tgz -------------------------------------------------------------------------------- /roles/plugins/istio/files/istio-discovery-1.2.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/istio/files/istio-discovery-1.2.0.tgz -------------------------------------------------------------------------------- /roles/plugins/istio/files/istio-egress-1.1.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/istio/files/istio-egress-1.1.0.tgz -------------------------------------------------------------------------------- /roles/plugins/istio/files/istio-ingress-1.1.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/istio/files/istio-ingress-1.1.0.tgz -------------------------------------------------------------------------------- /roles/plugins/istio/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - name: ISTIO | Delete istio 2 | command: "helm uninstall {{ item }} --namespace istio-system" 3 | with_items: 4 | - "istio-ingress" 5 | - "istio-egress" 6 | - "istio-pilot" 7 | - "istio-base" 8 | ignore_errors: true 9 | 10 | - name: ISTIO | Delete istio-system Namespace 11 | command: "{{ bin_dir }}/kubectl delete ns istio-system" 12 | ignore_errors: true -------------------------------------------------------------------------------- /roles/plugins/istio/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: Confirm Istio exist 2 | command: "{{ bin_dir }}/kubectl get ns -o name" 3 | register: istio_exist 4 | 5 | - name: Copy charts 6 | copy: src={{ item }} dest={{ base_dir }}/ mode=0644 7 | with_items: 8 | - "istio-base-1.1.0.tgz" 9 | - "istio-discovery-1.2.0.tgz" 10 | - "istio-ingress-1.1.0.tgz" 11 | - "istio-egress-1.1.0.tgz" 12 | 13 | - name: Create Namespace istio-system 14 | command: "{{ bin_dir }}/kubectl create ns istio-system" 15 | when: '"namespace/istio-system" not in istio_exist.stdout' 16 | 17 | - name: ISTIO | Create istio-base 18 | shell: "helm install istio-base -n istio-system {{ base_dir }}/istio-base-1.1.0.tgz -n istio-system --set global.istiod.enableAnalysis=true" 19 | 20 | - name: ISTIO | Create istio-discovery 21 | shell: "helm install istio-pilot -n istio-system {{ base_dir }}/istio-discovery-1.2.0.tgz -n istio-system \ 22 | --set pilot.image=registry.kubeoperator.io:8082/istio/pilot:1.11.8 \ 23 | --set pilot.resources.limits.cpu={{ pilot_limit_cpu }} \ 24 | --set pilot.resources.limits.memory={{ pilot_limit_memory }} \ 25 | --set pilot.resources.requests.cpu={{ pilot_requests_cpu }} \ 26 | --set pilot.resources.requests.memory={{ pilot_requests_memory }} \ 27 | --set pilot.traceSampling={{ pilot_trace_sampling }}" 28 | 29 | - name: ISTIO | Create istio-ingress 30 | shell: "helm install istio-ingress -n istio-system {{ base_dir }}/istio-ingress-1.1.0.tgz -n istio-system \ 31 | --set global.proxy.image=registry.kubeoperator.io:8082/istio/proxyv2:1.11.8 \ 32 | --set gateways.istio-ingressgateway.resources.limits.cpu={{ ingress_limit_cpu }} \ 33 | --set gateways.istio-ingressgateway.resources.limits.memory={{ ingress_limit_memory }} \ 34 | --set gateways.istio-ingressgateway.resources.requests.cpu={{ ingress_requests_cpu }} \ 35 | --set gateways.istio-ingressgateway.resources.requests.memory={{ ingress_requests_memory }} \ 36 | --set gateways.istio-ingressgateway.type={{ ingress_type }}" 37 | when: enable_istio_ingress == 'enable' 38 | 39 | - name: ISTIO | Create istio-egress 40 | shell: "helm install istio-egress -n istio-system {{ base_dir }}/istio-egress-1.1.0.tgz -n istio-system \ 41 | --set global.proxy.image=registry.kubeoperator.io:8082/istio/proxyv2:1.11.8 \ 42 | --set gateways.istio-egressgateway.resources.limits.cpu={{ egress_limit_cpu }} \ 43 | --set gateways.istio-egressgateway.resources.limits.memory={{ egress_limit_memory }} \ 44 | --set gateways.istio-egressgateway.resources.requests.cpu={{ egress_requests_cpu }} \ 45 | --set gateways.istio-egressgateway.resources.requests.memory={{ egress_requests_memory }}" 46 | when: enable_istio_egress == 'enable' -------------------------------------------------------------------------------- /roles/plugins/istio/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - include_tasks: enable.yml 3 | when: component_created_by == 'cluster' 4 | 5 | - include_tasks: enable.yml 6 | when: component_created_by == 'component' 7 | when: enable_istio == 'enable' 8 | 9 | - include_tasks: enable.yml 10 | when: component_created_by == 'cluster' 11 | 12 | - include_tasks: disable.yml 13 | when: 14 | - component_created_by == 'component' 15 | - enable_istio == 'disable' 16 | -------------------------------------------------------------------------------- /roles/plugins/metallb/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true -------------------------------------------------------------------------------- /roles/plugins/metallb/files/metallb-0.13.7.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/metallb/files/metallb-0.13.7.tgz -------------------------------------------------------------------------------- /roles/plugins/metallb/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - name: Delete metallb L2Advertisement 2 | shell: "{{ bin_dir }}/kubectl -n kube-system delete L2Advertisement kubeoperator-l2-advertisement" 3 | ignore_errors: true 4 | 5 | - name: Delete metallb IPAddressPool 6 | shell: "{{ bin_dir }}/kubectl -n kube-system delete IPAddressPool kubeoperator-ip-address-pool" 7 | ignore_errors: true 8 | 9 | - name: Delete metallb 10 | command: "helm uninstall kubeoperator-metallb --namespace kube-system" 11 | ignore_errors: true 12 | -------------------------------------------------------------------------------- /roles/plugins/metallb/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: Create metallb directory 2 | file: 3 | path: /etc/kubernetes/plugins/metallb 4 | state: directory 5 | 6 | - name: Copy metallb charts 7 | copy: src=metallb-0.13.7.tgz dest=/etc/kubernetes/plugins/metallb/ mode=0644 8 | 9 | - name: Helm install metallb 10 | shell: > 11 | helm install kubeoperator-metallb --namespace kube-system \ 12 | --set controller.image.repository={{ dns_repository_hostname }}:{{ registry_port }}/metallb/controller \ 13 | --set controller.image.tag={{ metallb_version }} \ 14 | --set speaker.image.repository={{ dns_repository_hostname }}:{{ registry_port }}/metallb/speaker \ 15 | --set speaker.image.tag={{ metallb_version }} \ 16 | /etc/kubernetes/plugins/metallb/metallb-0.13.7.tgz 17 | 18 | - name: Waiting for metallb-controller to ready 19 | shell: "{{ bin_dir }}/kubectl -n kube-system get pod -o wide | grep metallb-controller | awk '{print $2}'" 20 | register: metallb_controller_status 21 | until: metallb_controller_status.stdout == "1/1" 22 | retries: 10 23 | delay: 10 24 | 25 | - name: Copy metallb file 26 | template: 27 | src: "{{ item }}.j2" 28 | dest: "/etc/kubernetes/plugins/metallb/{{ item }}" 29 | mode: 0644 30 | with_items: 31 | - metallb-config-IPAddressPool.yaml 32 | - metallb-config-L2Advertisement.yaml 33 | 34 | - name: Create metallb IPAddressPool 35 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/metallb/metallb-config-IPAddressPool.yaml" 36 | 37 | - name: Create metallb L2Advertisement 38 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/metallb/metallb-config-L2Advertisement.yaml" 39 | -------------------------------------------------------------------------------- /roles/plugins/metallb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: disable.yml 2 | when: enable_metallb == 'disable' 3 | 4 | - include_tasks: enable.yml 5 | when: enable_metallb == 'enable' 6 | -------------------------------------------------------------------------------- /roles/plugins/metallb/templates/metallb-config-IPAddressPool.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: kubeoperator-ip-address-pool 5 | namespace: kube-system 6 | spec: 7 | addresses: 8 | {% if metallb_cidrs is defined and metallb_cidrs %} 9 | {% for cidr in metallb_cidrs.split(',') %} 10 | - {{ cidr }} 11 | {% endfor %} 12 | {% endif %} 13 | {% if metallb_ip_ranges is defined and metallb_ip_ranges %} 14 | {% for ip in metallb_ip_ranges.split(',') %} 15 | - {{ ip }} 16 | {% endfor %} 17 | {% endif %} 18 | -------------------------------------------------------------------------------- /roles/plugins/metallb/templates/metallb-config-L2Advertisement.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: kubeoperator-l2-advertisement 5 | namespace: kube-system 6 | spec: 7 | ipAddressPools: 8 | - kubeoperator-address-pool -------------------------------------------------------------------------------- /roles/plugins/metrics-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true -------------------------------------------------------------------------------- /roles/plugins/metrics-server/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - name: Copy metrics-server file 2 | template: 3 | src: metrics-server.yaml.j2 4 | dest: /etc/kubernetes/plugins/metrics-server/metrics-server.yaml 5 | 6 | - name: Delete metrics-server 7 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/metrics-server/metrics-server.yaml" 8 | ignore_errors: true -------------------------------------------------------------------------------- /roles/plugins/metrics-server/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: Copy metrics-server file 2 | template: 3 | src: metrics-server.yaml.j2 4 | dest: /etc/kubernetes/plugins/metrics-server/metrics-server.yaml 5 | 6 | - name: Ensure that apiserver accesses the etcd cluster properly 7 | shell: "{{ bin_dir }}/kubectl get ns" 8 | register: etcd_status 9 | until: '"kube-system" in etcd_status.stdout' 10 | retries: 10 11 | delay: 6 12 | 13 | - name: Deploy metrics-server 14 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/metrics-server/metrics-server.yaml" -------------------------------------------------------------------------------- /roles/plugins/metrics-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create metrics-server directory 2 | file: 3 | path: /etc/kubernetes/plugins/metrics-server 4 | state: directory 5 | 6 | - block: 7 | - include_tasks: enable.yml 8 | when: component_created_by == 'cluster' 9 | 10 | - include_tasks: "{{ item }}" 11 | with_items: 12 | - disable.yml 13 | - enable.yml 14 | when: component_created_by == 'component' 15 | when: enable_metrics_server == 'enable' 16 | 17 | - include_tasks: disable.yml 18 | when: 19 | - enable_metrics_server == 'disable' 20 | - component_created_by == 'component' 21 | -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-deploy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | # 1.10+ admission plugins 5 | kube_apiserver_enable_admission_plugins: 6 | - NodeRestriction 7 | # - AlwaysPullImages 8 | # - PodSecurityPolicy 9 | 10 | # calico mtu 11 | calico_veth_mtu: 1440 12 | # 设置 Felix 日志级别(debug, info, warning, error) 13 | calico_felix_log_level: "warning" -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-deploy/tasks/calico.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Calico | Copy calico crds file 3 | template: 4 | src: calico/crds.yaml.j2 5 | dest: /etc/kubernetes/plugins/network-plugin/crds.yaml 6 | 7 | - name: Calico | Copy calico typha file 8 | template: 9 | src: calico/calico-typha.yaml.j2 10 | dest: /etc/kubernetes/plugins/network-plugin/calico-typha.yaml 11 | when: calico_version == "v3.14.1" 12 | 13 | - block: 14 | - name: Calico | Copy calico crds file 15 | template: 16 | src: calico/crds-v3.16.yaml.j2 17 | dest: /etc/kubernetes/plugins/network-plugin/crds.yaml 18 | 19 | - name: Calico | Copy calico typha file 20 | template: 21 | src: calico/calico-typha-v3.16.yaml.j2 22 | dest: /etc/kubernetes/plugins/network-plugin/calico-typha.yaml 23 | when: calico_version == "v3.16.5" 24 | 25 | - block: 26 | - name: Calico | Copy calico crds file 27 | template: 28 | src: calico/crds-v3.18.yaml.j2 29 | dest: /etc/kubernetes/plugins/network-plugin/crds.yaml 30 | 31 | - name: Calico | Copy calico typha file 32 | template: 33 | src: calico/calico-typha-v3.18.yaml.j2 34 | dest: /etc/kubernetes/plugins/network-plugin/calico-typha.yaml 35 | when: calico_version == "v3.18.4" 36 | 37 | - block: 38 | - name: Calico | Copy calico crds file 39 | template: 40 | src: calico/crds-v3.21.yaml.j2 41 | dest: /etc/kubernetes/plugins/network-plugin/crds.yaml 42 | 43 | - name: Calico | Copy calico typha file 44 | template: 45 | src: calico/calico-typha-v3.21.yaml.j2 46 | dest: /etc/kubernetes/plugins/network-plugin/calico-typha.yaml 47 | when: calico_version == "v3.21.4" 48 | 49 | - name: Ensure that apiserver accesses the etcd cluster properly 50 | shell: "{{ bin_dir }}/kubectl get ns" 51 | register: etcd_status 52 | until: '"kube-system" in etcd_status.stdout' 53 | retries: 10 54 | delay: 6 55 | 56 | - name: Calico | Deploy calico crds 57 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/network-plugin/crds.yaml" 58 | 59 | - name: Ensure that apiserver accesses the etcd cluster properly 60 | shell: "{{ bin_dir }}/kubectl get ns" 61 | register: etcd_status 62 | until: '"kube-system" in etcd_status.stdout' 63 | retries: 10 64 | delay: 6 65 | 66 | - name: Calico | Deploy calico typha 67 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/network-plugin/calico-typha.yaml" 68 | 69 | - name: Download the calicoctl binary 70 | get_url: 71 | url: "{{ calicoctl_download_url }}" 72 | dest: "{{ cni_bin_dir }}" 73 | timeout: "{{ download_timeout_online }}" 74 | 75 | - name: Copy the calicoctl binary 76 | copy: 77 | src: "{{ cni_bin_dir}}/calicoctl-linux-{{ architectures }}" 78 | dest: "{{ bin_dir }}/calicoctl" 79 | remote_src: yes 80 | mode: "0755" 81 | -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-deploy/tasks/cilium.yml: -------------------------------------------------------------------------------- 1 | - name: Cilium | Copy cilium file 2 | template: 3 | src: cilium/cilium.yaml.j2 4 | dest: /etc/kubernetes/plugins/network-plugin/cilium.yaml 5 | 6 | - name: Ensure that apiserver accesses the etcd cluster properly 7 | shell: "{{ bin_dir }}/kubectl get ns" 8 | register: etcd_status 9 | until: '"kube-system" in etcd_status.stdout' 10 | retries: 10 11 | delay: 6 12 | 13 | - name: Cilium | Deploy cilium | amd64 host 14 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/network-plugin/cilium.yaml" 15 | -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-deploy/tasks/flannel.yml: -------------------------------------------------------------------------------- 1 | - name: Flannel | Copy flannel file 2 | template: 3 | src: flannel/kube-flannel.yaml.j2 4 | dest: /etc/kubernetes/plugins/network-plugin/kube-flannel.yaml 5 | 6 | - name: Ensure that apiserver accesses the etcd cluster properly 7 | shell: "{{ bin_dir }}/kubectl get ns" 8 | register: etcd_status 9 | until: '"kube-system" in etcd_status.stdout' 10 | retries: 10 11 | delay: 6 12 | 13 | - name: Flannel | Deploy flannel | amd64 host 14 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/network-plugin/kube-flannel.yaml" -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-deploy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: flannel.yml 2 | when: network_plugin == 'flannel' 3 | 4 | - include_tasks: calico.yml 5 | when: network_plugin == 'calico' 6 | 7 | - include_tasks: cilium.yml 8 | when: network_plugin == 'cilium' -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-prepare/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | # 1.10+ admission plugins 5 | kube_apiserver_enable_admission_plugins: 6 | - NodeRestriction 7 | # - AlwaysPullImages 8 | # - PodSecurityPolicy 9 | 10 | cni_bin_dir: "/opt/cni/bin" 11 | 12 | # calico mtu 13 | calico_veth_mtu: 1440 14 | # 设置 Felix 日志级别(debug, info, warning, error) 15 | calico_felix_log_level: "warning" -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-prepare/tasks/cilium.yml: -------------------------------------------------------------------------------- 1 | - name: Converts kernel versions to floating point numbers 2 | set_fact: 3 | KERNEL_VER: "{{ ansible_kernel.split('-')[0].split('.')[0]|int + ansible_kernel.split('-')[0].split('.')[1]|int/100 }}" 4 | 5 | - name: Check the kernel version > 4.9 6 | fail: msg="kernel {{ ansible_kernel }} is too old for cilium installing" 7 | when: "KERNEL_VER|float <= 4.09" 8 | -------------------------------------------------------------------------------- /roles/plugins/network-plugins/network-prepare/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: CNI | Create cni directory 2 | file: name=/etc/cni/net.d state=directory 3 | 4 | - name: CNI | Create cni manifests directory 5 | file: name=/etc/kubernetes/plugins/network-plugin state=directory 6 | 7 | - name: CNI | Create cni directory 8 | file: name="{{ cni_bin_dir }}" state=directory 9 | 10 | - name: CNI | Create cni directory 11 | file: name="{{ base_dir }}/cni" state=directory 12 | 13 | - name: CNI | Download cni plugins 14 | get_url: 15 | validate_certs: no 16 | url: "{{ cni_download_url }}" 17 | dest: "{{ base_dir }}/cni" 18 | timeout: "{{ download_timeout_online }}" 19 | 20 | - name: CNI | Unarchive cni plugins 21 | unarchive: 22 | src: "{{ base_dir }}/cni/cni-plugins-linux-{{ architectures }}-{{ cni_version }}.tgz" 23 | dest: "{{ cni_bin_dir }}" 24 | remote_src: yes 25 | 26 | - include_tasks: cilium.yml 27 | when: network_plugin == 'cilium' 28 | -------------------------------------------------------------------------------- /roles/plugins/npd/defaults/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/plugins/npd/defaults/main.yml -------------------------------------------------------------------------------- /roles/plugins/npd/tasks/disable.yml: -------------------------------------------------------------------------------- 1 | - name: Copy npd config file 2 | template: 3 | src: npd-config.yaml.j2 4 | dest: /etc/kubernetes/plugins/npd/npd-config.yaml 5 | 6 | - name: Copy npd ds file 7 | template: 8 | src: npd-ds.yaml.j2 9 | dest: /etc/kubernetes/plugins/npd/npd-ds.yaml 10 | 11 | - name: Delete npd config 12 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/npd/npd-config.yaml" 13 | ignore_errors: true 14 | 15 | - name: Delete npd ds 16 | shell: "{{ bin_dir }}/kubectl delete -f /etc/kubernetes/plugins/npd/npd-ds.yaml" 17 | ignore_errors: true -------------------------------------------------------------------------------- /roles/plugins/npd/tasks/enable.yml: -------------------------------------------------------------------------------- 1 | - name: Copy npd config file 2 | template: 3 | src: npd-config.yaml.j2 4 | dest: /etc/kubernetes/plugins/npd/npd-config.yaml 5 | 6 | - name: Copy npd ds file 7 | template: 8 | src: npd-ds.yaml.j2 9 | dest: /etc/kubernetes/plugins/npd/npd-ds.yaml 10 | 11 | - name: Deploy npd config 12 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/npd/npd-config.yaml" 13 | 14 | - name: Deploy npd ds 15 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/plugins/npd/npd-ds.yaml" -------------------------------------------------------------------------------- /roles/plugins/npd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create npd directory 2 | file: 3 | path: /etc/kubernetes/plugins/npd 4 | state: directory 5 | 6 | - include_tasks: disable.yml 7 | when: enable_npd == 'disable' 8 | 9 | - include_tasks: enable.yml 10 | when: enable_npd == 'enable' -------------------------------------------------------------------------------- /roles/plugins/npd/templates/npd-ds.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: node-problem-detector 6 | namespace: kube-system 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: system:node-problem-detector 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - nodes 17 | verbs: 18 | - get 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - nodes/status 23 | verbs: 24 | - patch 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - events 29 | verbs: 30 | - create 31 | - patch 32 | - update 33 | --- 34 | kind: ClusterRoleBinding 35 | apiVersion: rbac.authorization.k8s.io/v1 36 | metadata: 37 | name: system:node-problem-detector 38 | roleRef: 39 | apiGroup: rbac.authorization.k8s.io 40 | kind: ClusterRole 41 | name: system:node-problem-detector 42 | subjects: 43 | - kind: ServiceAccount 44 | name: node-problem-detector 45 | namespace: kube-system 46 | --- 47 | apiVersion: apps/v1 48 | kind: DaemonSet 49 | metadata: 50 | name: node-problem-detector 51 | namespace: kube-system 52 | labels: 53 | app: node-problem-detector 54 | spec: 55 | selector: 56 | matchLabels: 57 | app: node-problem-detector 58 | template: 59 | metadata: 60 | labels: 61 | app: node-problem-detector 62 | spec: 63 | serviceAccountName: node-problem-detector 64 | containers: 65 | - name: node-problem-detector 66 | command: 67 | - /node-problem-detector 68 | - --logtostderr 69 | - --config.system-log-monitor=/config/abrt-adaptor.json,/config/docker-monitor.json,/config/kernel-monitor.json,/config/systemd-monitor.json 70 | image: {{ npd_image }} 71 | resources: 72 | limits: 73 | cpu: 10m 74 | memory: 80Mi 75 | requests: 76 | cpu: 10m 77 | memory: 80Mi 78 | imagePullPolicy: IfNotPresent 79 | securityContext: 80 | privileged: true 81 | env: 82 | - name: NODE_NAME 83 | valueFrom: 84 | fieldRef: 85 | fieldPath: spec.nodeName 86 | volumeMounts: 87 | - name: log 88 | mountPath: /var/log 89 | readOnly: true 90 | - name: kmsg 91 | mountPath: /dev/kmsg 92 | readOnly: true 93 | - name: localtime 94 | mountPath: /etc/localtime 95 | readOnly: true 96 | - name: config 97 | mountPath: /config 98 | readOnly: true 99 | volumes: 100 | - name: log 101 | hostPath: 102 | path: /var/log/ 103 | - name: kmsg 104 | hostPath: 105 | path: /dev/kmsg 106 | - name: localtime 107 | hostPath: 108 | path: /etc/localtime 109 | - name: config 110 | configMap: 111 | name: node-problem-detector-config 112 | items: 113 | - key: abrt-adaptor.json 114 | path: abrt-adaptor.json 115 | - key: docker-monitor.json 116 | path: docker-monitor.json 117 | - key: kernel-monitor.json 118 | path: kernel-monitor.json 119 | - key: systemd-monitor.json 120 | path: systemd-monitor.json -------------------------------------------------------------------------------- /roles/post/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true -------------------------------------------------------------------------------- /roles/post/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Ensure that apiserver accesses the etcd cluster properly 3 | shell: "{{ bin_dir }}/kubectl get ns" 4 | register: etcd_status 5 | until: '"kube-system" in etcd_status.stdout' 6 | retries: 10 7 | delay: 6 8 | 9 | - name: Get cluster node info 10 | shell: "{{ bin_dir }}/kubectl get node -o name" 11 | register: kubectl_get_node_output 12 | run_once: true 13 | 14 | - block: 15 | - name: Cancel the node label 16 | shell: > 17 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/control-plane- && 18 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/master- && 19 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/worker- && 20 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/etcd- 21 | ignore_errors: true 22 | 23 | - name: Set the master label 24 | shell: > 25 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/control-plane='' --overwrite && 26 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/master='' --overwrite 27 | when: inventory_hostname in groups['kube-master'] 28 | 29 | - name: Set master to unschedulable 30 | shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}" 31 | when: 32 | - master_schedule_type == "disable" 33 | - inventory_hostname in groups['kube-master'] 34 | 35 | - name: Set the worker label 36 | shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/worker='' --overwrite" 37 | when: inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 38 | 39 | - name: Set the etcd label 40 | shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/etcd='' --overwrite" 41 | when: inventory_hostname in groups['etcd'] 42 | delegate_to: "{{ groups['kube-master'][0] }}" 43 | when: inventory_hostname in kubectl_get_node_output.stdout 44 | 45 | - block: 46 | - name: Copy serviceAccount file 47 | template: 48 | src: ko-admin.yaml.j2 49 | dest: /etc/kubernetes/ko-admin.yaml 50 | 51 | - name: Ensure that apiserver accesses the etcd cluster properly 52 | shell: "{{ bin_dir }}/kubectl get ns" 53 | register: etcd_status_1 54 | until: '"kube-system" in etcd_status_1.stdout' 55 | retries: 10 56 | delay: 6 57 | 58 | - name: Deploy serviceAccount 59 | shell: "{{ bin_dir }}/kubectl apply -f /etc/kubernetes/ko-admin.yaml" 60 | 61 | - name: Ensure that apiserver accesses the etcd cluster properly 62 | shell: "{{ bin_dir }}/kubectl get ns" 63 | register: etcd_status_2 64 | until: '"kube-system" in etcd_status_2.stdout' 65 | retries: 10 66 | delay: 6 67 | 68 | - name: Ensure kube-operator namespace 69 | shell: " {{ bin_dir }}/kubectl get namespaces | grep kube-operator || {{ bin_dir }}/kubectl create namespace kube-operator" 70 | run_once: true 71 | delegate_to: "{{ groups['kube-master'][0] }}" -------------------------------------------------------------------------------- /roles/post/templates/ko-admin.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: ko-admin 6 | namespace: kube-system 7 | --- 8 | kind: ClusterRoleBinding 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: ko-admin 12 | subjects: 13 | - kind: ServiceAccount 14 | name: ko-admin 15 | namespace: kube-system 16 | roleRef: 17 | kind: ClusterRole 18 | name: cluster-admin 19 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /roles/prepare/base/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | command_warnings: false 4 | # 当前节点ip 5 | CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" -------------------------------------------------------------------------------- /roles/prepare/base/tasks/centos.yml: -------------------------------------------------------------------------------- 1 | - name: Status firewalld 2 | shell: > 3 | systemctl status firewalld | grep active || echo "not be found" 4 | register: firewalld_already_installed 5 | 6 | - name: Disable firewalld 7 | service: 8 | name: firewalld 9 | state: stopped 10 | enabled: no 11 | when: '"active" in firewalld_already_installed.stdout' 12 | ignore_errors: true 13 | 14 | - name: Install base rpm package 15 | yum: 16 | name: 17 | - conntrack-tools 18 | - bash-completion 19 | - libseccomp 20 | - psmisc 21 | - rsync 22 | - socat 23 | - sshpass 24 | state: latest 25 | 26 | - name: Install ipvs rpm package 27 | yum: 28 | name: 29 | - ipset 30 | - ipvsadm 31 | state: latest 32 | when: kube_proxy_mode == "ipvs" 33 | 34 | - name: Temp stop selinux 35 | shell: "setenforce 0" 36 | failed_when: false 37 | 38 | - name: Disable selinux 39 | lineinfile: 40 | dest: /etc/selinux/config 41 | regexp: "^SELINUX=" 42 | line: "SELINUX=disabled" 43 | 44 | - name: Disable rsyslog Get journald log 1 45 | lineinfile: 46 | dest: /etc/rsyslog.conf 47 | state: present 48 | regexp: 'ModLoad imjournal' 49 | line: '#$ModLoad imjournal # provides access to the systemd journal' 50 | ignore_errors: true 51 | 52 | - name: Disable rsyslog Get journald log 2 53 | lineinfile: 54 | dest: /etc/rsyslog.conf 55 | state: present 56 | regexp: 'IMJournalStateFile' 57 | line: '#$IMJournalStateFile imjournal.state' 58 | ignore_errors: true 59 | 60 | - name: Restart rsyslog service 61 | service: 62 | name: rsyslog 63 | state: restarted 64 | ignore_errors: true -------------------------------------------------------------------------------- /roles/prepare/base/tasks/common.yml: -------------------------------------------------------------------------------- 1 | - name: Disable swap 2 | shell: "swapoff -a && sysctl -w vm.swappiness=0" 3 | ignore_errors: true 4 | 5 | - name: Delete fstab swap config 6 | lineinfile: 7 | path: /etc/fstab 8 | regexp: 'swap' 9 | state: absent 10 | backup: yes 11 | 12 | - name: Load kernel module 13 | modprobe: 14 | name: "{{ item }}" 15 | state: present 16 | with_items: 17 | - sunrpc 18 | - ip_vs 19 | - ip_vs_rr 20 | - ip_vs_sh 21 | - ip_vs_wrr 22 | - br_netfilter 23 | ignore_errors: true 24 | 25 | - name: Load nf_conntrack for kernel < 4.19 26 | modprobe: 27 | name: nf_conntrack_ipv4 28 | state: present 29 | when: ansible_kernel is version('4.19', '<') 30 | ignore_errors: true 31 | 32 | - name: Load nf_conntrack for kernel >= 4.19 33 | modprobe: 34 | name: nf_conntrack 35 | state: present 36 | when: ansible_kernel is version('4.19', '>=') 37 | ignore_errors: true 38 | 39 | - name: Setup systemd-modules-load config 40 | template: 41 | src: 10-k8s-modules.conf.j2 42 | dest: /etc/modules-load.d/10-k8s-modules.conf 43 | 44 | - name: Restart systemd-modules-load 45 | service: 46 | name: systemd-modules-load 47 | state: restarted 48 | enabled: yes 49 | 50 | - name: Set system parameters 51 | template: 52 | src: 99-sysctl-ko.conf.j2 53 | dest: /etc/sysctl.d/99-sysctl-ko.conf 54 | 55 | - name: Effective system parameters 56 | shell: "sysctl -p /etc/sysctl.d/99-sysctl-ko.conf" 57 | ignore_errors: true 58 | 59 | - name: Check whether the sysctl.conf exists 60 | stat: 61 | path: /etc/sysctl.conf 62 | register: sysctl_conf_stat 63 | 64 | - name: Delete the default ip_forward parameter 65 | lineinfile: 66 | dest: /etc/sysctl.conf 67 | regexp: "^net.ipv4.ip_forward" 68 | state: absent 69 | when: sysctl_conf_stat.stat.isreg is defined 70 | 71 | - name: Create systemd directory 72 | file: 73 | name: /etc/systemd/system.conf.d 74 | state: directory 75 | 76 | - name: Setup system ulimits 77 | template: 78 | src: 30-k8s-ulimits.conf.j2 79 | dest: /etc/systemd/system.conf.d/30-k8s-ulimits.conf 80 | 81 | - name: Setup hostname 82 | shell: "hostnamectl set-hostname {{ inventory_hostname }}" 83 | when: cluster_name_style == "hostname" 84 | 85 | - name: Change the default Python version 86 | shell: "rm -rf /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python" 87 | when: ansible_distribution in [ 'Kylin Linux Advanced Server','Kylin' ] 88 | ignore_errors: true -------------------------------------------------------------------------------- /roles/prepare/base/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | - name: Run "apt-get update" 2 | shell: "sudo rm -rf /var/lib/apt/lists/* && sudo /usr/bin/apt-get update -y" 3 | ignore_errors: true 4 | 5 | - name: Install Python-apt ('Ubuntu Magjor Version' = 18) 6 | shell: "sudo /usr/bin/apt-get install python-apt -y" 7 | when: 8 | - ansible_distribution == "Ubuntu" 9 | - ansible_distribution_release == "bionic" 10 | ignore_errors: true 11 | 12 | - name: Status firewalld 13 | shell: > 14 | systemctl status ufw | grep active || echo "not be found" 15 | register: ufw_already_installed 16 | 17 | - name: Debian | Disable UFW 18 | service: 19 | name: ufw 20 | state: stopped 21 | enabled: no 22 | when: '"active" in ufw_already_installed.stdout' 23 | ignore_errors: true 24 | 25 | - name: Debian | Install base package 26 | apt: 27 | name: 28 | - conntrack 29 | - bash-completion 30 | - libseccomp2 31 | - psmisc 32 | - rsync 33 | - socat 34 | - sshpass 35 | state: latest 36 | 37 | - name: Debian | Install ipvs package 38 | apt: 39 | name: 40 | - ipset 41 | - ipvsadm 42 | state: latest 43 | when: kube_proxy_mode == "ipvs" 44 | 45 | - name: Disable rsyslog Get journald log 1 46 | lineinfile: 47 | dest: /etc/rsyslog.conf 48 | state: present 49 | regexp: 'ModLoad imjournal' 50 | line: '#$ModLoad imjournal # provides access to the systemd journal' 51 | 52 | - name: Disable rsyslog Get journald log 2 53 | lineinfile: 54 | dest: /etc/rsyslog.conf 55 | state: present 56 | regexp: 'IMJournalStateFile' 57 | line: '#$IMJournalStateFile imjournal.state' 58 | 59 | - name: Restart rsyslog service 60 | service: 61 | name: rsyslog 62 | state: restarted 63 | ignore_errors: true 64 | 65 | - name: Restarting Cron avoids job confusion caused by changing the time zone 66 | service: 67 | name: cron 68 | state: restarted 69 | enabled: yes -------------------------------------------------------------------------------- /roles/prepare/base/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # 公共系统参数设置 2 | - include_tasks: common.yml 3 | 4 | # CentOS 系列|系统基础软件环境 5 | - include_tasks: centos.yml 6 | when: ansible_distribution in [ 'CentOS','RedHat','EulerOS','openEuler','Kylin Linux Advanced Server' ] 7 | 8 | # Debian 系列|系统基础软件环境 9 | - include_tasks: debian.yml 10 | when: ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] 11 | 12 | - name: Prepare some directory 13 | file: 14 | name: "{{ item }}" 15 | state: directory 16 | with_items: 17 | - "{{ bin_dir }}" 18 | - "{{ ansible_env.PWD }}/.kube" 19 | - "{{ ansible_env.HOME }}/.kube" -------------------------------------------------------------------------------- /roles/prepare/base/templates/10-k8s-modules.conf.j2: -------------------------------------------------------------------------------- 1 | sunrpc 2 | ip_vs 3 | ip_vs_rr 4 | ip_vs_wrr 5 | ip_vs_sh 6 | br_netfilter 7 | {% if ansible_kernel is version('4.19', '>=') %} 8 | nf_conntrack 9 | {% else %} 10 | nf_conntrack_ipv4 11 | {% endif %} -------------------------------------------------------------------------------- /roles/prepare/base/templates/30-k8s-ulimits.conf.j2: -------------------------------------------------------------------------------- 1 | [Manager] 2 | DefaultLimitCORE=infinity 3 | DefaultLimitNOFILE=100000 4 | DefaultLimitNPROC=100000 -------------------------------------------------------------------------------- /roles/prepare/base/templates/sunrpc.conf.j2: -------------------------------------------------------------------------------- 1 | options nf_conntrack hashsize=131072 2 | options sunrpc tcp_slot_table_entries=128 3 | options sunrpc tcp_max_slot_table_entries=128 -------------------------------------------------------------------------------- /roles/prepare/containerd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 启用容器仓库镜像 2 | ENABLE_MIRROR_REGISTRY: true -------------------------------------------------------------------------------- /roles/prepare/containerd/templates/containerd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=containerd container runtime 3 | Documentation=https://containerd.io 4 | After=network.target 5 | 6 | [Service] 7 | Environment="PATH={{ bin_dir }}:/bin:/sbin:/usr/bin:/usr/sbin" 8 | ExecStart={{ bin_dir }}/containerd 9 | Restart=always 10 | RestartSec=5 11 | Delegate=yes 12 | KillMode=process 13 | OOMScoreAdjust=-999 14 | LimitNOFILE=1048576 15 | LimitNPROC=1048576 16 | LimitCORE=1048576 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /roles/prepare/containerd/templates/crictl.yaml.j2: -------------------------------------------------------------------------------- 1 | runtime-endpoint: unix:///run/containerd/containerd.sock 2 | -------------------------------------------------------------------------------- /roles/prepare/docker/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | # 信任的不安全镜像库地址,默认为 Pod 和 Service 网段 2 | docker_insecure_registries: 3 | - "{{ registry_hostname }}:{{ registry_port }}" 4 | - "{{ registry_hostname }}:{{ registry_hosted_port }}" 5 | - "{{ dns_repository_hostname }}:{{ registry_hosted_port }}" 6 | - "{{ dns_repository_hostname }}:{{ registry_port }}" 7 | - "{{ kube_pod_subnet }}" 8 | - "{{ kube_service_subnet }}" 9 | 10 | # docker日志相关 11 | docker_log_driver: "json-file" 12 | docker_log_level: "warn" 13 | docker_log_max_size: "10m" 14 | docker_log_max_file: 3 15 | 16 | # 并行镜像下载数量 17 | docker_max_concurrent_downloads: 10 -------------------------------------------------------------------------------- /roles/prepare/docker/files/docker-tag: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | 4 | USER="admin" 5 | PASS="XXXXXXXXXXXXXXXXXX" 6 | HURL="https://{{ HARBOR_DOMAIN }}" 7 | MTAG=$2 8 | CONTAIN=$3 9 | 10 | function usage() { 11 | cat << HELP 12 | 13 | docker-tag -- list all tags for a Docker image on a remote registry 14 | 15 | EXAMPLE: 16 | - list all tags for ubuntu: 17 | docker-tag tags ubuntu 18 | 19 | - list all php tags containing apache: 20 | docker-tag tags php apache 21 | 22 | - list all images of harbor: 23 | docker-tag get_images 24 | 25 | - list all tags for harbor redis: 26 | docker-tag get_tags redis/redis 27 | 28 | HELP 29 | } 30 | 31 | if [ $# -lt 1 ]; then 32 | usage 33 | exit 2 34 | fi 35 | 36 | function tags() { 37 | TAGS=$(curl -ksL https://registry.hub.docker.com/v1/repositories/${MTAG}/tags | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}') 38 | if [ "${CONTAIN}" != "" ]; then 39 | echo -e $(echo "${TAGS}" | grep "${CONTAIN}") | tr ' ' '\n' 40 | else 41 | echo "${TAGS}" 42 | fi 43 | } 44 | 45 | function get_images() { 46 | RTOKEN=$(curl -k -s -u ${USER}:${PASS} ${HURL}/service/token?account=${USER}\&service=harbor-registry\&scope=registry:catalog:* | grep "token" | awk -F '"' '{print $4}') 47 | RLIST=$(curl -k -s -H "authorization: bearer ${RTOKEN} " ${HURL}/v2/_catalog | awk -F '[' '{print $2}'|awk -F ']' '{print $1}' | sed 's/"//g') 48 | echo ${RLIST} | tr ',' '\n' 49 | } 50 | 51 | function get_tags() { 52 | TTOKEN=$(curl -iksL -X GET -u ${USER}:${PASS} ${HURL}/service/token?account=${USER}\&service=harbor-registry\&scope=repository:${MTAG}:pull | grep "token" | awk -F '"' '{print $4}') 53 | TLIST=$(curl -ksL -X GET -H "Content-Type: application/json" -H "Authorization: Bearer ${TTOKEN}" ${HURL}/v2/${MTAG}/tags/list| awk -F '[' '{print $2}' | awk -F ']' '{print $1}' | sed 's/"//g') 54 | echo ${TLIST} | tr ',' '\n' 55 | } 56 | 57 | case $1 in 58 | get_images) 59 | get_images 60 | ;; 61 | get_tags) 62 | get_tags 63 | ;; 64 | tags) 65 | tags 66 | ;; 67 | *) 68 | usage 69 | ;; 70 | esac 71 | -------------------------------------------------------------------------------- /roles/prepare/docker/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Get whether Containerd has been installed 2 | shell: 'systemctl status containerd|grep Active || echo "NOT FOUND"' 3 | register: containerd_status 4 | 5 | - name: Error message 6 | fail: msg="Containerd already installed!" 7 | when: '"running" in containerd_status.stdout' 8 | 9 | - name: Prepare docker directory 10 | file: name={{ item }} state=directory 11 | with_items: 12 | - /etc/docker 13 | 14 | - name: Create base directory 15 | file: name={{ item }} state=directory 16 | with_items: 17 | - "{{ base_dir }}" 18 | 19 | - name: Download the Docker binaries 20 | get_url: 21 | validate_certs: no 22 | url: "{{ docker_download_url }}" 23 | dest: "{{ base_dir }}/" 24 | timeout: "{{ download_timeout_online }}" 25 | tags: upgrade 26 | 27 | - name: Unarchive the Docker binaries 28 | unarchive: 29 | src: "{{ base_dir }}/docker-{{ docker_version }}.tgz" 30 | dest: "{{ base_dir }}/" 31 | remote_src: yes 32 | tags: upgrade 33 | 34 | - name: Converts Docker version information to a floating point number 35 | set_fact: 36 | DOCKER_VER: "{{ docker_version.split('.')[0]|int + docker_version.split('.')[1]|int/100 }}" 37 | connection: local 38 | run_once: true 39 | tags: upgrade 40 | 41 | - name: Copy the docker file to env(>= 18.09.x) 42 | copy: 43 | src: "{{ base_dir}}/docker/{{ item }}" 44 | dest: "{{ bin_dir }}/" 45 | remote_src: yes 46 | mode: "0755" 47 | with_items: 48 | - containerd 49 | - containerd-shim 50 | - docker-init 51 | - runc 52 | - docker 53 | - ctr 54 | - dockerd 55 | - docker-proxy 56 | tags: upgrade 57 | when: "DOCKER_VER|float >= 18.09" 58 | 59 | - name: Copy the docker file to env(>= 20.10.x) 60 | copy: 61 | src: "{{ base_dir}}/docker/{{ item }}" 62 | dest: "{{ bin_dir }}/" 63 | remote_src: yes 64 | mode: "0755" 65 | with_items: 66 | - containerd-shim-runc-v2 67 | tags: upgrade 68 | when: "DOCKER_VER|float >= 20.10" 69 | 70 | - name: Setup docker command completion 71 | copy: src=docker dest=/etc/bash_completion.d/docker mode=0644 72 | ignore_errors: true 73 | 74 | - name: Copy docker daemon file 75 | template: src=daemon.json.j2 dest=/etc/docker/daemon.json 76 | 77 | - name: flush-iptables 78 | shell: "iptables -P INPUT ACCEPT \ 79 | && iptables -F && iptables -X \ 80 | && iptables -F -t nat && iptables -X -t nat \ 81 | && iptables -F -t raw && iptables -X -t raw \ 82 | && iptables -F -t mangle && iptables -X -t mangle" 83 | 84 | - name: Create docker systemd unit file 85 | template: src=docker.service.j2 dest=/etc/systemd/system/docker.service 86 | tags: upgrade 87 | 88 | - name: Enable docker service 89 | shell: systemctl enable docker 90 | ignore_errors: true 91 | 92 | - name: Start docker service 93 | shell: systemctl daemon-reload && systemctl restart docker 94 | tags: upgrade 95 | 96 | - name: Waiting for docker to running 97 | shell: "systemctl status docker.service|grep Active" 98 | register: docker_status 99 | until: '"running" in docker_status.stdout' 100 | retries: 8 101 | delay: 2 102 | tags: upgrade 103 | -------------------------------------------------------------------------------- /roles/prepare/docker/templates/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | {% if docker_mirror_registry == "enable" %} 3 | "registry-mirrors": [ 4 | "https://docker.mirrors.ustc.edu.cn", 5 | "https://reg-mirror.qiniu.com", 6 | "https://hub-mirror.c.163.com" 7 | ], 8 | {% endif %} 9 | {% if docker_remote_api == "enable" %} 10 | "hosts": ["tcp://0.0.0.0:2376", "unix:///var/run/docker.sock"], 11 | {% endif %} 12 | {% if docker_insecure_registries is defined and docker_insecure_registries != None %} 13 | "insecure-registries": [{% for registry in docker_insecure_registries %}"{{ registry }}"{% if not loop.last %},{% endif %}{% endfor %}], 14 | {% endif %} 15 | "max-concurrent-downloads": {{ docker_max_concurrent_downloads | int }}, 16 | "log-driver": "{{ docker_log_driver }}", 17 | "log-level": "{{ docker_log_level }}", 18 | "log-opts": { 19 | "max-size": "{{ docker_log_max_size }}", 20 | "max-file": "{{ docker_log_max_file }}" 21 | }, 22 | "live-restore": true, 23 | "bip": "{{ docker_subnet }}", 24 | "data-root": "{{ docker_storage_dir }}", 25 | "exec-opts": ["native.cgroupdriver={{ cgroup_driver }}"] 26 | } -------------------------------------------------------------------------------- /roles/prepare/docker/templates/docker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=http://docs.docker.io 4 | 5 | [Service] 6 | Environment="PATH={{ bin_dir }}:/bin:/sbin:/usr/bin:/usr/sbin" 7 | ExecStart={{ bin_dir }}/dockerd 8 | ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT 9 | ExecReload=/bin/kill -s HUP $MAINPID 10 | Restart=always 11 | RestartSec=5 12 | LimitNOFILE=1048576 13 | LimitNPROC=1048576 14 | LimitCORE=1048576 15 | Delegate=yes 16 | KillMode=process 17 | 18 | [Install] 19 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/prepare/etcd-certificates/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true -------------------------------------------------------------------------------- /roles/prepare/etcd-certificates/tasks/certs_stat.yml: -------------------------------------------------------------------------------- 1 | # 根据stat信息判断是否已经生成过edcd证书,如果没有,下一步生成证书 2 | # 如果已经有etcd证书,为了保证整个安装的幂等性,跳过证书生成的步骤 3 | - name: Read etcd-ca certificate private key stat info 4 | stat: 5 | path: /etc/kubernetes/pki/etcd/ca.key 6 | register: etcd_ca_key_stat 7 | 8 | - name: Read etcd-ca root certificate stat info 9 | stat: 10 | path: /etc/kubernetes/pki/etcd/ca.crt 11 | register: etcd_ca_crt_stat 12 | 13 | - name: Read healthcheck-client certificate private key stat info 14 | stat: 15 | path: /etc/kubernetes/pki/etcd/healthcheck-client.key 16 | register: etcd_healthcheck_client_key_stat 17 | 18 | - name: Read server certificate private key stat info 19 | stat: 20 | path: /etc/kubernetes/pki/etcd/server.key 21 | register: etcd_server_key_stat 22 | 23 | - name: Read peer certificate private key stat info 24 | stat: 25 | path: /etc/kubernetes/pki/etcd/peer.key 26 | register: etcd_peer_key_stat 27 | 28 | - name: Read apiserver-etcd-client certificate private key stat info 29 | stat: 30 | path: /etc/kubernetes/pki/apiserver-etcd-client.key 31 | register: apiserver_etcd_client_key_stat -------------------------------------------------------------------------------- /roles/prepare/etcd-certificates/tasks/distribute.yml: -------------------------------------------------------------------------------- 1 | # 分发证书 2 | - name: Get etcd certificate 3 | slurp: 4 | src: /etc/kubernetes/pki/etcd/{{ item }} 5 | with_items: 6 | - ca.crt 7 | - ca.key 8 | - healthcheck-client.crt 9 | - healthcheck-client.key 10 | - peer.crt 11 | - peer.key 12 | - server.crt 13 | - server.key 14 | register: etcd_certs 15 | delegate_to: "{{ groups['etcd'][0] }}" 16 | run_once: true 17 | 18 | - name: Distribute etcd certificate to etcd node 19 | copy: 20 | dest: "{{ item.source }}" 21 | content: "{{ item.content | b64decode }}" 22 | owner: root 23 | group: root 24 | mode: 0644 25 | no_log: true 26 | with_items: "{{ etcd_certs.results }}" 27 | when: 28 | - inventory_hostname != groups['etcd'][0] 29 | - inventory_hostname in groups['etcd'] 30 | 31 | - name: Get apiserver etcd client certificate 32 | slurp: 33 | src: /etc/kubernetes/pki/{{ item }} 34 | with_items: 35 | - etcd/ca.crt 36 | - apiserver-etcd-client.crt 37 | - apiserver-etcd-client.key 38 | register: etcd_client_certs 39 | delegate_to: "{{ groups['etcd'][0] }}" 40 | run_once: true 41 | 42 | - name: Distribute apiserver etcd client certificate to master node 43 | copy: 44 | dest: "{{ item.source }}" 45 | content: "{{ item.content | b64decode }}" 46 | owner: root 47 | group: root 48 | mode: 0644 49 | no_log: true 50 | with_items: "{{ etcd_client_certs.results }}" 51 | when: 52 | - inventory_hostname != groups['etcd'][0] 53 | - inventory_hostname in groups['kube-master'] -------------------------------------------------------------------------------- /roles/prepare/etcd-certificates/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: On etcd node Create etcd directory 2 | file: 3 | name: /etc/kubernetes/pki/etcd 4 | state: directory 5 | when: inventory_hostname in groups['etcd'] 6 | 7 | - name: On master node create etcd certificate directory 8 | file: 9 | name: /etc/kubernetes/pki/etcd 10 | state: directory 11 | when: inventory_hostname in groups['kube-master'] 12 | 13 | - block: 14 | # 获取密钥状态 15 | - include_tasks: certs_stat.yml 16 | # 生成证书 17 | - include_tasks: generate.yml 18 | when: inventory_hostname == groups['etcd'][0] 19 | 20 | # 分发证书 21 | - include_tasks: distribute.yml -------------------------------------------------------------------------------- /roles/prepare/etcd-certificates/templates/etcd-openssl.cnf.j2: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | default_md = sha256 4 | distinguished_name = req_distinguished_name 5 | 6 | [req_distinguished_name] 7 | 8 | [ v3_ca ] 9 | basicConstraints = critical, CA:TRUE 10 | keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign 11 | 12 | [ v3_req_server ] 13 | basicConstraints = CA:FALSE 14 | keyUsage = critical, digitalSignature, keyEncipherment 15 | extendedKeyUsage = serverAuth 16 | 17 | [ v3_req_client ] 18 | basicConstraints = CA:FALSE 19 | keyUsage = critical, digitalSignature, keyEncipherment 20 | extendedKeyUsage = clientAuth 21 | 22 | [ v3_req_peer ] 23 | basicConstraints = CA:FALSE 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = serverAuth, clientAuth 26 | subjectAltName = @alt_names_etcd 27 | 28 | [ alt_names_etcd ] 29 | DNS.1 = localhost 30 | {% set dns_idx = 1 | int %} 31 | {% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %} 32 | {% for host in (groups['etcd']|unique) %} 33 | DNS.{{ dns_idx + loop.index }} = {% if hostvars[host]['ansible_ssh_host'] is defined %}{{ host }}{% endif %} 34 | 35 | {% endfor %} 36 | {% endif %} 37 | IP.1 = 127.0.0.1 38 | IP.2 = 0:0:0:0:0:0:0:1 39 | {% set ip_idx = 2 | int %} 40 | {% for host in (groups['etcd']|unique) %} 41 | IP.{{ ip_idx + loop.index }} = {% if hostvars[host]['ansible_ssh_host'] is defined %}{{ hostvars[host]['ansible_ssh_host'] }}{% else %}{{ host }}{% endif %} 42 | 43 | {% endfor %} -------------------------------------------------------------------------------- /roles/prepare/kube-certificates/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | KUBERNETES_SERVICE_IP: "{{ kube_service_subnet | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" -------------------------------------------------------------------------------- /roles/prepare/kube-certificates/tasks/certs_stat.yml: -------------------------------------------------------------------------------- 1 | # 根据statinfo判断是否已经生成过kubernetescertificate,如果没有,下一步生成certificate 2 | # 如果已经有kubernetescertificate,为了保证整个安装的幂等性,跳过certificate生成的步骤 3 | - name: Read kubernetes-ca root certificate private key stat info 4 | stat: 5 | path: /etc/kubernetes/pki/ca.key 6 | register: ca_key_stat 7 | 8 | - name: Read kubernetes-ca root certificate stat info 9 | stat: 10 | path: /etc/kubernetes/pki/ca.crt 11 | register: ca_crt_stat 12 | 13 | - name: Read front-proxy-ca root certificate private key stat info 14 | stat: 15 | path: /etc/kubernetes/pki/front-proxy-ca.key 16 | register: front_proxy_ca_key_stat 17 | 18 | - name: Read front-proxy-ca root certificate stat info 19 | stat: 20 | path: /etc/kubernetes/pki/front-proxy-ca.crt 21 | register: front_proxy_ca_crt_stat 22 | 23 | - name: Read apiserver certificate private key stat info 24 | stat: 25 | path: /etc/kubernetes/pki/apiserver.key 26 | register: apiserver_key_stat 27 | 28 | - name: Read apiserver-kubelet-client certificate private key stat info 29 | stat: 30 | path: /etc/kubernetes/pki/apiserver-kubelet-client.key 31 | register: apiserver_kubelet_client_key_stat 32 | 33 | - name: Read front-proxy-client certificate private key stat info 34 | stat: 35 | path: /etc/kubernetes/pki/front-proxy-client.key 36 | register: front_proxy_client_key_stat 37 | 38 | - name: Read kube-scheduler certificate private key stat info 39 | stat: 40 | path: /etc/kubernetes/pki/kube-scheduler.key 41 | register: kube_scheduler_key_stat 42 | 43 | - name: Read sa certificate private key stat info 44 | stat: 45 | path: /etc/kubernetes/pki/sa.key 46 | register: sa_key_stat 47 | 48 | - name: Read sa certificate public key stat info 49 | stat: 50 | path: /etc/kubernetes/pki/sa.pub 51 | register: sa_pud_stat 52 | 53 | - name: Read admin certificate private key stat info 54 | stat: 55 | path: /etc/kubernetes/pki/admin.key 56 | register: admin_key_stat 57 | 58 | - name: Read kubelet certificate private key stat info 59 | stat: 60 | path: /etc/kubernetes/pki/kubelet.key 61 | register: kubelet_key_stat -------------------------------------------------------------------------------- /roles/prepare/kube-certificates/tasks/distribute.yml: -------------------------------------------------------------------------------- 1 | # 分发certificate 2 | - name: Get kubernetes master node certificate 3 | slurp: 4 | src: /etc/kubernetes/pki/{{ item }} 5 | with_items: 6 | - admin.crt 7 | - admin.key 8 | - apiserver.crt 9 | - apiserver.key 10 | - apiserver-kubelet-client.crt 11 | - apiserver-kubelet-client.key 12 | - ca.crt 13 | - ca.key 14 | - front-proxy-ca.crt 15 | - front-proxy-ca.key 16 | - front-proxy-client.crt 17 | - front-proxy-client.key 18 | - kube-controller-manager.crt 19 | - kube-scheduler.crt 20 | - kube-scheduler.key 21 | - sa.key 22 | - sa.pub 23 | register: kubernetes_master_certs 24 | run_once: true 25 | delegate_to: "{{ groups['kube-master'][0] }}" 26 | 27 | - name: Distribute kubernetes master certificate to master node 28 | copy: 29 | dest: "{{ item.source }}" 30 | content: "{{ item.content | b64decode }}" 31 | owner: root 32 | group: root 33 | mode: 0644 34 | no_log: true 35 | with_items: "{{ kubernetes_master_certs.results }}" 36 | when: 37 | - inventory_hostname != groups['kube-master'][0] 38 | - inventory_hostname in groups['kube-master'] 39 | 40 | - name: Get kubelet service certificate 41 | slurp: 42 | src: /var/lib/kubelet/pki/{{ item }} 43 | with_items: 44 | - kubelet.crt 45 | - kubelet.key 46 | register: kubelet_certs 47 | run_once: true 48 | delegate_to: "{{ groups['kube-master'][0] }}" 49 | 50 | - name: Distribute kubelet service certificate to all nodes 51 | copy: 52 | dest: "{{ item.source }}" 53 | content: "{{ item.content | b64decode }}" 54 | owner: root 55 | group: root 56 | mode: 0644 57 | no_log: true 58 | with_items: "{{ kubelet_certs.results }}" 59 | when: 60 | - inventory_hostname != groups['kube-master'][0] 61 | - inventory_hostname in (groups['kube-master'] + groups['kube-worker'] + groups['new-worker']) -------------------------------------------------------------------------------- /roles/prepare/kube-certificates/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create kubernetes certificate directory 2 | file: 3 | name: "{{ item }}" 4 | state: directory 5 | with_items: 6 | - /etc/kubernetes/pki 7 | - /var/lib/kubelet/pki 8 | 9 | - block: 10 | # 获取密钥状态 11 | - include_tasks: certs_stat.yml 12 | # 生成公共证书 13 | - include_tasks: common.yml 14 | when: inventory_hostname == groups['kube-master'][0] 15 | 16 | # 分发证书 17 | - include_tasks: distribute.yml 18 | 19 | - name: Read kubelet.conf file stat info 20 | stat: 21 | path: /etc/kubernetes/kubelet.conf 22 | register: kubelet_conf_stat 23 | 24 | - name: Configuration kubeconfig 25 | include_role: 26 | name: kube-config 27 | tasks_from: kubeconfig 28 | when: kubelet_conf_stat.stat.exists -------------------------------------------------------------------------------- /roles/prepare/kube-certificates/templates/kube-openssl.cnf.j2: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | default_md = sha256 4 | distinguished_name = req_distinguished_name 5 | 6 | [req_distinguished_name] 7 | 8 | [ v3_ca ] 9 | basicConstraints = critical, CA:TRUE 10 | keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign 11 | 12 | [ v3_req_server ] 13 | basicConstraints = CA:FALSE 14 | keyUsage = critical, digitalSignature, keyEncipherment 15 | extendedKeyUsage = serverAuth 16 | subjectAltName = @alt_kube_apiserver 17 | 18 | [ v3_req_kubelet ] 19 | basicConstraints = CA:FALSE 20 | keyUsage = critical, digitalSignature, keyEncipherment 21 | extendedKeyUsage = serverAuth 22 | subjectAltName = @alt_kubelet 23 | 24 | [ v3_req_client ] 25 | basicConstraints = CA:FALSE 26 | keyUsage = critical, digitalSignature, keyEncipherment 27 | extendedKeyUsage = clientAuth 28 | 29 | [ alt_kube_apiserver ] 30 | DNS.1 = localhost 31 | DNS.2 = kubernetes 32 | DNS.3 = kubernetes.default 33 | DNS.4 = kubernetes.default.svc 34 | {% set dns_idx = 4 | int %} 35 | {% for sub_domain in kube_dns_domain.split('.') %} 36 | {% set outer_loop = loop %} 37 | DNS.{{ dns_idx + loop.index }} = kubernetes.default.svc.{% for domain in kube_dns_domain.split('.') %}{% if loop.index <= outer_loop.index %}{{ domain }}{% if loop.index < outer_loop.index %}.{% endif %}{% endif %}{% endfor %} 38 | 39 | {% endfor %} 40 | {% set dns_idx = 4 + (kube_dns_domain.split('.')|length) | int %} 41 | {% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %} 42 | {% set dns_idx = 4 + kube_dns_domain.split('.')|length | int %} 43 | {% for host in (groups['kube-master'] | default([])) | unique %} 44 | DNS.{{ dns_idx + loop.index }} = {{ host }} 45 | {% endfor %} 46 | {% endif %} 47 | IP.1 = 127.0.0.1 48 | IP.2 = 0:0:0:0:0:0:0:1 49 | IP.3 = {{ KUBERNETES_SERVICE_IP }} 50 | {% set ip_idx = 3 | int %} 51 | {% for host in (groups['kube-master'] | default([])) | unique %} 52 | IP.{{ ip_idx + loop.index }} = {% if hostvars[host]['ansible_ssh_host'] is defined %}{{ hostvars[host]['ansible_ssh_host'] }}{% else %}{{ host }}{% endif %} 53 | 54 | {% endfor %} 55 | {% set ip_idx = 3 + groups['kube-master']|length | int %} 56 | {% if lb_kube_apiserver_ip is defined %} 57 | IP.{{4 + groups['kube-master']|length | int }} = {{ lb_kube_apiserver_ip | trim }} 58 | {% endif %} 59 | 60 | [ alt_kubelet ] 61 | DNS.1 = localhost 62 | {% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %} 63 | {% set dns_idx = 1 | int %} 64 | {% for host in (groups['kube-master'] + groups['kube-worker'] + groups['new-worker'] | default([])) | unique %} 65 | DNS.{{ dns_idx + loop.index }} = {{ host }} 66 | {% endfor %} 67 | {% endif %} 68 | IP.1 = 127.0.0.1 69 | IP.2 = 0:0:0:0:0:0:0:1 70 | {% set ip_idx = 2 | int %} 71 | {% for host in (groups['kube-master'] + groups['kube-worker'] + groups['new-worker'] | default([])) | unique %} 72 | IP.{{ ip_idx + loop.index }} = {% if hostvars[host]['ansible_ssh_host'] is defined %}{{ hostvars[host]['ansible_ssh_host'] }}{% else %}{{ host }}{% endif %} 73 | 74 | {% endfor %} -------------------------------------------------------------------------------- /roles/prepare/kubernetes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | # 当前节点ip 4 | CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" 5 | -------------------------------------------------------------------------------- /roles/prepare/kubernetes/files/kubernetes.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/prepare/kubernetes/files/kubernetes.gpg -------------------------------------------------------------------------------- /roles/prepare/kubernetes/templates/10-kubeadm.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" 3 | Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" 4 | EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env 5 | 6 | {% if ansible_distribution in [ "CentOS","RedHat","EulerOS","openEuler",'Kylin Linux Advanced Server' ] %} 7 | EnvironmentFile=-/etc/sysconfig/kubelet 8 | {% endif %} 9 | {% if ansible_distribution in [ 'Ubuntu','Debian','Kylin' ] %} 10 | EnvironmentFile=/etc/default/kubelet 11 | {% endif %} 12 | ExecStart= 13 | ExecStart={{ bin_dir }}/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS 14 | -------------------------------------------------------------------------------- /roles/prepare/kubernetes/templates/kubelet.config.j2: -------------------------------------------------------------------------------- 1 | {% if container_runtime == "docker" %} 2 | KUBELET_EXTRA_ARGS=--node-ip={{ CURRENT_HOST_IP }} --cgroup-driver={{ cgroup_driver }} 3 | {% endif %} 4 | {% if container_runtime == "containerd" %} 5 | KUBELET_EXTRA_ARGS=--node-ip={{ CURRENT_HOST_IP }} --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock 6 | {% endif %} -------------------------------------------------------------------------------- /roles/prepare/kubernetes/templates/kubelet.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubelet: The Kubernetes Node Agent 3 | Documentation=https://kubernetes.io/docs/ 4 | 5 | [Service] 6 | ExecStartPre=/usr/sbin/swapoff -a 7 | ExecStart={{ bin_dir }}/kubelet 8 | Restart=always 9 | StartLimitInterval=0 10 | RestartSec=10 11 | 12 | [Install] 13 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/prepare/nameserver/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Insert repository record 2 | lineinfile: 3 | path: "/etc/hosts" 4 | regexp: '{{ registry_hostname }}' 5 | line: "{{registry_hostname}} {{ dns_repository_hostname }}" 6 | 7 | - name: Insert hosts record 8 | lineinfile: 9 | path: "/etc/hosts" 10 | regexp: "{{ item }}" 11 | line: "{{ hostvars[item]['ansible_ssh_host'] }} {{ item }}" 12 | with_items: 13 | - "{{ groups['kube-master'] }}" 14 | - "{{ groups['kube-worker'] }}" 15 | - "{{ groups['new-worker'] }}" 16 | when: cluster_name_style == "hostname" -------------------------------------------------------------------------------- /roles/prepare/repository/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | # ubuntu apt版本 2 | ubuntu_apt_url: "{% if architectures == 'amd64' %}apt-proxy{% else %}apt-proxy-arm64{% endif %}" 3 | ubuntu_version: "{% if ansible_distribution_major_version == '20' %}focal{% elif ansible_distribution_major_version == '18' %}bionic{% elif ansible_distribution_major_version == '16' %}xenial{% endif %}" 4 | -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-euler-amd64.j2: -------------------------------------------------------------------------------- 1 | [EulerOS-Base] 2 | name=EulerOS-2.0SP5 base 3 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/euler-base/2.5/os/$basearch/ 4 | enabled=1 5 | gpgcheck=1 6 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/euler-base/2.5/os/RPM-GPG-KEY-EulerOS 7 | protect=1 8 | 9 | [Centos-epel] 10 | name=CentOS epel 11 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-epel/7/$basearch/ 12 | enabled=1 13 | gpgcheck=0 14 | protect=1 15 | -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-euler-arm64.j2: -------------------------------------------------------------------------------- 1 | [EulerOS-Base] 2 | name=EulerOS-2.0SP8 base 3 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/euler-base/2.8/os/$basearch/ 4 | enabled=1 5 | gpgcheck=1 6 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/euler-base/2.8/os/RPM-GPG-KEY-EulerOS 7 | protect=1 8 | 9 | [Centos-epel] 10 | name=CentOS epel 11 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-epel/7/$basearch/ 12 | enabled=1 13 | gpgcheck=0 14 | protect=1 15 | -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-kylin-amd64-deb.j2: -------------------------------------------------------------------------------- 1 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-base/KYLIN-ALL 10.1 main restricted universe multiverse 2 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-base/KYLIN-ALL 10.1-hwe main 3 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-deb/kylin/production/PART-V10-SP1/custom/partner/V10-SP1 default all -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-kylin-amd64-rpm.j2: -------------------------------------------------------------------------------- 1 | ###Kylin Linux Advanced Server 10 - os repo### 2 | 3 | [ks10-adv-os] 4 | name = Kylin Linux Advanced Server 10 - Os 5 | baseurl = {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-rpm/V10/V10SP2/os/adv/lic/base/$basearch/ 6 | gpgcheck = 1 7 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-kylin 8 | enabled = 1 9 | 10 | [ks10-adv-updates] 11 | name = Kylin Linux Advanced Server 10 - Updates 12 | baseurl = {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-rpm/V10/V10SP2/os/adv/lic/updates/$basearch/ 13 | gpgcheck = 1 14 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-kylin 15 | enabled = 1 16 | 17 | [ks10-adv-addons] 18 | name = Kylin Linux Advanced Server 10 - Addons 19 | baseurl = {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-rpm/V10/V10SP2/os/adv/lic/addons/$basearch/ 20 | gpgcheck = 1 21 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-kylin 22 | enabled = 0 -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-kylin-arm64-deb.j2: -------------------------------------------------------------------------------- 1 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-base/KYLIN-ALL 10.0 main universe multiverse restricted 2 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-deb/kylin/production/PART-V10/custom/partner/V10 default all -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-kylin-arm64-rpm.j2: -------------------------------------------------------------------------------- 1 | ###Kylin Linux Advanced Server 10 - os repo### 2 | 3 | [ks10-adv-os] 4 | name = Kylin Linux Advanced Server 10 - Os 5 | baseurl = {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-rpm/V10/V10SP1/os/adv/lic/base/$basearch/ 6 | gpgcheck = 0 7 | enabled = 1 8 | 9 | [ks10-adv-updates] 10 | name = Kylin Linux Advanced Server 10 - Updates 11 | baseurl = {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-rpm/V10/V10SP1/os/adv/lic/updates/$basearch/ 12 | gpgcheck = 0 13 | enabled = 0 14 | 15 | [ks10-adv-addons] 16 | name = Kylin Linux Advanced Server 10 - Addons 17 | baseurl = {{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/kylin-rpm/V10/V10SP1/os/adv/lic/addons/$basearch/ 18 | gpgcheck = 0 19 | enabled = 0 -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-linux-amd64.j2: -------------------------------------------------------------------------------- 1 | [Centos-Base] 2 | name=CentOS Base 3 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-base/7/extras/$basearch/ 4 | enabled=1 5 | gpgcheck=0 6 | protect=1 7 | 8 | 9 | [Centos-Extras] 10 | name=CentOS Extras 11 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-base/7/os/$basearch/ 12 | enabled=1 13 | gpgcheck=0 14 | protect=1 15 | 16 | [Centos-epel] 17 | name=CentOS epel 18 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-epel/7/$basearch/ 19 | enabled=1 20 | gpgcheck=0 21 | protect=1 22 | -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-linux-arm64.j2: -------------------------------------------------------------------------------- 1 | [Centos-Base] 2 | name=CentOS Base 3 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-altarch/7/os/$basearch/ 4 | enabled=1 5 | gpgcheck=0 6 | protect=1 7 | 8 | [Centos-Extras] 9 | name=CentOS Extras 10 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-altarch/7/extras/$basearch/ 11 | enabled=1 12 | gpgcheck=0 13 | protect=1 14 | 15 | [Centos-epel] 16 | name=CentOS epel 17 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/centos-epel/7/$basearch/ 18 | enabled=1 19 | gpgcheck=0 20 | protect=1 21 | -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-openeuler.j2: -------------------------------------------------------------------------------- 1 | [OS] 2 | name=OS 3 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/OS/$basearch/ 4 | enabled=1 5 | gpgcheck=1 6 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/OS/$basearch/RPM-GPG-KEY-openEuler 7 | 8 | [everything] 9 | name=everything 10 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/everything/$basearch/ 11 | enabled=1 12 | gpgcheck=1 13 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/everything/$basearch/RPM-GPG-KEY-openEuler 14 | 15 | [EPOL] 16 | name=EPOL 17 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/EPOL/main/$basearch/ 18 | enabled=1 19 | gpgcheck=1 20 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/OS/$basearch/RPM-GPG-KEY-openEuler 21 | 22 | [debuginfo] 23 | name=debuginfo 24 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/debuginfo/$basearch/ 25 | enabled=1 26 | gpgcheck=1 27 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/debuginfo/$basearch/RPM-GPG-KEY-openEuler 28 | 29 | [source] 30 | name=source 31 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/source/ 32 | enabled=1 33 | gpgcheck=1 34 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/source/RPM-GPG-KEY-openEuler 35 | 36 | [update] 37 | name=update 38 | baseurl={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/update/$basearch/ 39 | enabled=1 40 | gpgcheck=1 41 | gpgkey={{ registry_protocol }}://{{ registry_hostname }}:{{repo_port}}/repository/openEuler-base/openEuler-22.03-LTS/OS/$basearch/RPM-GPG-KEY-openEuler -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-ubuntu-amd64.j2: -------------------------------------------------------------------------------- 1 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }} main restricted universe multiverse 2 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-security main restricted universe multiverse 3 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates main restricted universe multiverse 4 | # deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-proposed main restricted universe multiverse 5 | # deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-backports main restricted universe multiverse 6 | deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }} main restricted universe multiverse 7 | deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-security main restricted universe multiverse 8 | deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates main restricted universe multiverse 9 | # deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-proposed main restricted universe multiverse 10 | # deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-backports main restricted universe multiverse -------------------------------------------------------------------------------- /roles/prepare/repository/templates/kubeops.repo-ubuntu-arm64.j2: -------------------------------------------------------------------------------- 1 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }} main restricted universe multiverse 2 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-security main restricted universe multiverse 3 | deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates main restricted universe multiverse 4 | # deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-proposed main restricted universe multiverse 5 | # deb {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-backports main restricted universe multiverse 6 | deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }} main restricted universe multiverse 7 | deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-security main restricted universe multiverse 8 | deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-updates main restricted universe multiverse 9 | # deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-proposed main restricted universe multiverse 10 | # deb-src {{ registry_protocol }}://{{ registry_hostname }}:{{ repo_port }}/repository/{{ ubuntu_apt_url }}-{{ ansible_distribution_release }} {{ ansible_distribution_release }}-backports main restricted universe multiverse -------------------------------------------------------------------------------- /roles/remove/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | 4 | drain_grace_period: 300 5 | drain_timeout: 360s -------------------------------------------------------------------------------- /roles/remove/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "Cordon worker node:{{ inventory_hostname }}" 2 | shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}" 3 | ignore_errors: true 4 | delegate_to: "{{ groups['kube-master'][0] }}" 5 | 6 | - name: "Drain worker node:Pod running on {{ inventory_hostname }}" 7 | shell: > 8 | {{ bin_dir }}/kubectl drain 9 | --force 10 | --ignore-daemonsets 11 | --grace-period {{ drain_grace_period }} 12 | --timeout {{ drain_timeout }} 13 | --delete-local-data {{ inventory_hostname }} 14 | ignore_errors: true 15 | delegate_to: "{{ groups['kube-master'][0] }}" 16 | 17 | - name: Remove the worker role label from the node 18 | shell: > 19 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/worker='' --overwrite && 20 | {{ bin_dir }}/kubectl label node {{ inventory_hostname }} node-role.kubernetes.io/worker- 21 | ignore_errors: true 22 | delegate_to: "{{ groups['kube-master'][0] }}" 23 | 24 | - name: "Remove node:{{ inventory_hostname }}" 25 | shell: "{{ bin_dir }}/kubectl delete node {{ inventory_hostname }}" 26 | ignore_errors: true 27 | delegate_to: "{{ groups['kube-master'][0] }}" -------------------------------------------------------------------------------- /roles/reset/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true -------------------------------------------------------------------------------- /roles/restore/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true 3 | # 当前节点ip 4 | CURRENT_HOST_IP: "{% if hostvars[inventory_hostname]['ansible_ssh_host'] is defined %}{{ hostvars[inventory_hostname]['ansible_ssh_host'] }}{% else %}{{ inventory_hostname }}{% endif %}" 5 | # etcd 集群 6 | INITIAL_CLUSTER: "{% for host in (groups['etcd']|unique) %}{% if hostvars[host]['ansible_ssh_host'] is defined %}etcd-{{ host }}=https://{{ hostvars[host]['ansible_ssh_host'] }}:2380{% else %}etcd-{{ host }}=https://{{ host }}:2380{% endif %}{% if not loop.last %},{% endif %}{% endfor %}" -------------------------------------------------------------------------------- /roles/restore/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - block: 2 | - name: Read etcd backup file 3 | find: 4 | paths: "{{ ('/var/ko/data/backup/' + cluster_name) | realpath }}" 5 | patterns: "{{ etcd_snapshot_name }}" 6 | register: etcd_back_paths 7 | 8 | - name: Verify the backup file exists 9 | assert: 10 | that: etcd_back_paths.files|length >= 1 11 | msg: "未获取到etcd备份文件。" 12 | delegate_to: localhost 13 | 14 | - name: Confirm kubelet has stopped running 15 | service: 16 | name: kubelet 17 | state: stopped 18 | 19 | - name: Confirm etcd has stopped running 20 | service: 21 | name: etcd 22 | state: stopped 23 | 24 | - name: Confirm etcd data related directories exist 25 | file: 26 | name: "{{ item }}" 27 | state: directory 28 | with_items: 29 | - /etc/kubernetes/backup/etcd 30 | 31 | - name: Copy etcd backup file to all etcd node 32 | copy: 33 | src: "/var/ko/data/backup/{{ cluster_name }}/{{ etcd_snapshot_name }}" 34 | dest: "/etc/kubernetes/backup/etcd/{{ etcd_snapshot_name }}" 35 | 36 | - name: Confirm backup directory exist 37 | file: 38 | name: "/var/ko/data/backup/k8s" 39 | state: directory 40 | 41 | - name: Clean up the last backup and restore data 42 | file: 43 | name: "/var/ko/data/backup/k8s/etcd" 44 | state: absent 45 | 46 | - name: Restore the data to the etcd data directory 47 | shell: "cp -rf {{ etcd_data_dir }} /var/ko/data/backup/k8s/" 48 | 49 | - name: Clean up etcd data directory 50 | file: 51 | name: "{{ etcd_data_dir }}" 52 | state: absent 53 | 54 | - name: Restore etcd data 55 | shell: "cd /etc/kubernetes/backup/etcd && \ 56 | ETCDCTL_API=3 {{ bin_dir }}/etcdctl snapshot restore \ 57 | {{ etcd_snapshot_name }} \ 58 | --data-dir={{ etcd_data_dir }} \ 59 | --name etcd-{{ inventory_hostname }} \ 60 | --initial-cluster {{ INITIAL_CLUSTER }} \ 61 | --initial-cluster-token etcd-cluster-token \ 62 | --initial-advertise-peer-urls https://{{ CURRENT_HOST_IP }}:2380" 63 | 64 | - name: Authorization etcd data directory 65 | file: 66 | path: "{{ etcd_data_dir }}" 67 | mode: '0755' 68 | 69 | - name: Remove temporary etcd backup files from each node 70 | file: 71 | name: "/etc/kubernetes/backup/etcd/{{ etcd_snapshot_name }}" 72 | state: absent 73 | 74 | - name: Systemctl daemon-reload 75 | systemd: 76 | daemon_reload: yes 77 | 78 | - name: Restart etcd service 79 | service: 80 | name: etcd 81 | state: restarted 82 | enabled: yes 83 | 84 | - name: Start kubelet service 85 | service: 86 | name: kubelet 87 | state: restarted 88 | enabled: yes -------------------------------------------------------------------------------- /roles/upgrade/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # 提权操作 2 | ansible_become: true -------------------------------------------------------------------------------- /roles/upgrade/files/kubernetes.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KubeOperator/ansible/144eac851c91623619bea7962933d9de6e7bafe5/roles/upgrade/files/kubernetes.gpg -------------------------------------------------------------------------------- /roles/upgrade/tasks/centos.yml: -------------------------------------------------------------------------------- 1 | - name: Download kubeadm kubelet kubectl file 2 | get_url: 3 | validate_certs: no 4 | url: "{{ k8s_upgrade_download_url }}" 5 | dest: "{{ base_dir }}/" 6 | timeout: "{{ download_timeout_online }}" 7 | 8 | - name: Unarchive kubeadm kubelet kubectl file 9 | unarchive: 10 | src: "{{ base_dir }}/k8s.tar.gz" 11 | dest: "{{ base_dir }}/" 12 | remote_src: yes 13 | 14 | - name: Copy kubeadm kubelet kubectl file 15 | copy: 16 | src: "{{ base_dir}}/k8s/{{ item }}" 17 | dest: "{{ bin_dir }}/" 18 | remote_src: yes 19 | mode: "0755" 20 | with_items: 21 | - kubeadm 22 | - kubelet 23 | - kubectl 24 | 25 | - block: 26 | - name: Download kube-master image 27 | get_url: 28 | validate_certs: no 29 | url: "{{ item }}" 30 | dest: "{{ base_dir }}/k8s" 31 | timeout: "{{ download_timeout_online }}" 32 | with_items: 33 | - "{{ kube_controller_manager_upgrade_download_url }}" 34 | - "{{ kube_apiserver_upgrade_download_url }}" 35 | - "{{ kube_scheduler_upgrade_download_url }}" 36 | 37 | - name: Docker | Load kube-master image 38 | shell: "{{ bin_dir }}/docker load -i {{ base_dir }}/k8s/{{ item }}" 39 | with_items: 40 | - kube-controller-manager.tar 41 | - kube-apiserver.tar 42 | - kube-scheduler.tar 43 | when: container_runtime == 'docker' 44 | 45 | - name: Containerd | Load kube-master image 46 | shell: "{{ bin_dir }}/ctr -n=k8s.io images import {{ base_dir }}/k8s/{{ item }}" 47 | with_items: 48 | - kube-controller-manager.tar 49 | - kube-apiserver.tar 50 | - kube-scheduler.tar 51 | when: container_runtime == 'containerd' 52 | when: inventory_hostname in groups['kube-master'] 53 | 54 | - block: 55 | - name: Download kube-proxy and pause image 56 | get_url: 57 | validate_certs: no 58 | url: "{{ item }}" 59 | dest: "{{ base_dir }}/k8s" 60 | timeout: "{{ download_timeout_online }}" 61 | with_items: 62 | - "{{ kube_proxy_upgrade_download_url }}" 63 | - "{{ pause_upgrade_download_url }}" 64 | 65 | - block: 66 | - name: Docker | Load kube-proxy image 67 | shell: "{{ bin_dir }}/docker load -i {{ base_dir }}/k8s/{{ item }}" 68 | with_items: kube-proxy.tar 69 | 70 | - name: Docker | Load pause image 71 | shell: "{{ bin_dir }}/docker load -i {{ base_dir }}/k8s/{{ item }}" 72 | with_items: pause.tar 73 | when: container_runtime == 'docker' 74 | 75 | - block: 76 | - name: Containerd | Load kube-proxy image 77 | shell: "{{ bin_dir }}/ctr -n=k8s.io images import {{ base_dir }}/k8s/{{ item }}" 78 | with_items: kube-proxy.tar 79 | 80 | - name: Containerd | Load pause image 81 | shell: "{{ bin_dir }}/ctr -n=k8s.io images import {{ base_dir }}/k8s/{{ item }}" 82 | with_items: pause.tar 83 | when: container_runtime == 'containerd' 84 | when: inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 85 | 86 | - include_tasks: common.yml 87 | when: inventory_hostname in (groups['kube-master'] + groups['kube-worker'] + groups['new-worker']) -------------------------------------------------------------------------------- /roles/upgrade/tasks/common.yml: -------------------------------------------------------------------------------- 1 | - name: Confirm kubeadm version 2 | command: "{{ bin_dir }}/kubeadm version -o short" 3 | register: kubeadm_version_output 4 | 5 | - name: Setup kubeadm api version to v1beta1 6 | set_fact: 7 | kubeadmConfig_api_version: v1beta1 8 | when: 9 | - kubeadm_version_output.stdout is version('v1.13.0', '>=') 10 | - kubeadm_version_output.stdout is version('v1.15.0', '<') 11 | 12 | - name: Setup kubeadm api version to v1beta2 13 | set_fact: 14 | kubeadmConfig_api_version: v1beta2 15 | when: kubeadm_version_output.stdout is version('v1.15.0', '>=') 16 | 17 | - name: Remove unused parameters iptables.max 18 | lineinfile: 19 | path: /etc/kubernetes/kubeadm-config.yaml 20 | regexp: 'max:' 21 | state: absent 22 | 23 | - name: Remove unused parameters resourceContainer 24 | lineinfile: 25 | path: /etc/kubernetes/kubeadm-config.yaml 26 | regexp: 'resourceContainer:' 27 | state: absent 28 | 29 | - name: Upgrade kubeadm config version 30 | lineinfile: 31 | path: /etc/kubernetes/kubeadm-config.yaml 32 | regexp: '^kubernetesVersion' 33 | line: "kubernetesVersion: {{ kube_upgrade_version }}" 34 | 35 | - name: "Migration kubeadm config to {{ kube_upgrade_version }} version" 36 | shell: > 37 | {{ bin_dir }}/kubeadm config migrate 38 | --old-config=/etc/kubernetes/kubeadm-config.yaml 39 | --new-config=/etc/kubernetes/kubeadm-config.yaml 40 | when: kubeadmConfig_api_version != "v1beta2" 41 | 42 | - name: "Upgrade the first master node: {{ inventory_hostname }} to {{ kube_upgrade_version }}" 43 | shell: > 44 | {{ bin_dir }}/kubeadm upgrade apply --config=/etc/kubernetes/kubeadm-config.yaml --force 45 | --ignore-preflight-errors=CoreDNSUnsupportedPlugins,CoreDNSMigration 46 | when: inventory_hostname == groups['kube-master'][0] 47 | 48 | - name: "Upgrade the remaining master nodes: {{ inventory_hostname }} to {{ kube_upgrade_version }}" 49 | shell: > 50 | {{ bin_dir }}/kubeadm upgrade node 51 | {% if kube_upgrade_version.split('.')[1]|int == 14 %} 52 | experimental-control-plane 53 | {% endif %} 54 | when: 55 | - inventory_hostname != groups['kube-master'][0] 56 | - inventory_hostname in groups['kube-master'] 57 | 58 | - name: "Upgrade worker node: {{ inventory_hostname }} to {{ kube_upgrade_version }}" 59 | shell: > 60 | {{ bin_dir }}/kubeadm upgrade node 61 | {% if kube_upgrade_version.split('.')[1]|int == 14 %} 62 | config --kubelet-version {{ kube_upgrade_version }} 63 | {% endif %} 64 | when: 65 | - inventory_hostname in (groups['kube-worker'] + groups['new-worker']) 66 | - inventory_hostname not in groups['kube-master'] -------------------------------------------------------------------------------- /roles/upgrade/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: centos.yml 2 | 3 | - name: Config kublet 4 | lineinfile: 5 | path: /var/lib/kubelet/config.yaml 6 | regexp: 'resolvConf:' 7 | line: "resolvConf: {% if ansible_distribution == 'Ubuntu' %} /run/systemd/resolve/resolv.conf {% else %} /etc/resolv.conf {% endif %}" 8 | 9 | - name: Systemctl daemon-reload 10 | systemd: 11 | daemon_reload: yes 12 | 13 | - name: Restart kubelet service 14 | service: 15 | name: kubelet 16 | state: restarted 17 | enabled: yes 18 | 19 | - name: Update kubectl command line auto-completion 20 | shell: "kubectl completion bash > /usr/share/bash-completion/completions/kubectl" 21 | ignore_errors: true --------------------------------------------------------------------------------