├── .gitmodules ├── .nojekyll ├── CNAME ├── extra_playbooks ├── roles ├── inventory ├── wait-for-cloud-init.yml └── files │ └── get_cinder_pvs.sh ├── requirements.txt ├── roles ├── kubernetes-apps │ ├── helm │ │ ├── .gitkeep │ │ └── defaults │ │ │ └── main.yml │ ├── csi_driver │ │ ├── gcp_pd │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── gcp-pd-csi-cred-secret.yml.j2 │ │ ├── OWNERS │ │ ├── aws_ebs │ │ │ ├── templates │ │ │ │ └── aws-ebs-csi-driver.yml.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── vsphere │ │ │ └── templates │ │ │ │ ├── vsphere-csi-driver.yml.j2 │ │ │ │ ├── vsphere-csi-cloud-config.j2 │ │ │ │ └── vsphere-csi-controller-service.yml.j2 │ │ ├── azuredisk │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ ├── azure-csi-cloud-config-secret.yml.j2 │ │ │ │ └── azure-csi-azuredisk-driver.yml.j2 │ │ ├── upcloud │ │ │ ├── templates │ │ │ │ ├── upcloud-csi-driver.yml.j2 │ │ │ │ └── upcloud-csi-cred-secret.yml.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── cinder │ │ │ ├── templates │ │ │ ├── cinder-csi-driver.yml.j2 │ │ │ ├── cinder-csi-cloud-config-secret.yml.j2 │ │ │ └── cinder-csi-poddisruptionbudget.yml.j2 │ │ │ └── tasks │ │ │ └── cinder-write-cacert.yml │ ├── network_plugin │ │ ├── calico │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── kube-router │ │ │ └── OWNERS │ │ ├── kube-ovn │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── canal │ │ │ └── tasks │ │ │ └── main.yml │ ├── cluster_roles │ │ ├── templates │ │ │ └── namespace.j2 │ │ └── files │ │ │ └── k8s-cluster-critical-pc.yml │ ├── metallb │ │ └── OWNERS │ ├── snapshots │ │ ├── snapshot-controller │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── snapshot-ns.yml.j2 │ │ ├── cinder-csi │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── argocd │ │ ├── defaults │ │ │ └── main.yml │ │ └── templates │ │ │ └── argocd-namespace.yml.j2 │ ├── persistent_volumes │ │ ├── aws-ebs-csi │ │ │ ├── OWNERS │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── azuredisk-csi │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── azure-csi-storage-class.yml.j2 │ │ ├── upcloud-csi │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── openstack │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── cinder-csi │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── gcp-pd-csi │ │ │ └── defaults │ │ │ └── main.yml │ ├── registry │ │ └── templates │ │ │ ├── registry-sa.yml.j2 │ │ │ ├── registry-ns.yml.j2 │ │ │ ├── registry-secrets.yml.j2 │ │ │ ├── registry-cm.yml.j2 │ │ │ ├── registry-cr.yml.j2 │ │ │ ├── registry-crb.yml.j2 │ │ │ └── registry-pvc.yml.j2 │ ├── container_runtimes │ │ ├── crun │ │ │ └── templates │ │ │ │ └── runtimeclass-crun.yml │ │ ├── youki │ │ │ └── templates │ │ │ │ └── runtimeclass-youki.yml │ │ ├── gvisor │ │ │ └── templates │ │ │ │ └── runtimeclass-gvisor.yml.j2 │ │ └── kata_containers │ │ │ ├── defaults │ │ │ └── main.yaml │ │ │ └── templates │ │ │ └── runtimeclass-kata-qemu.yml.j2 │ ├── external_cloud_controller │ │ ├── openstack │ │ │ └── OWNERS │ │ ├── hcloud │ │ │ └── templates │ │ │ │ ├── external-hcloud-cloud-service-account.yml.j2 │ │ │ │ ├── external-hcloud-cloud-secret.yml.j2 │ │ │ │ └── external-hcloud-cloud-role-bindings.yml.j2 │ │ └── vsphere │ │ │ └── templates │ │ │ ├── external-vsphere-cpi-cloud-config.j2 │ │ │ └── external-vsphere-cpi-cloud-config-secret.yml.j2 │ ├── krew │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── krew.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── ansible │ │ └── templates │ │ │ ├── netchecker-agent-sa.yml.j2 │ │ │ ├── netchecker-server-sa.yml.j2 │ │ │ ├── netchecker-ns.yml.j2 │ │ │ ├── coredns-sa.yml.j2 │ │ │ ├── nodelocaldns-sa.yml.j2 │ │ │ ├── netchecker-server-clusterrole.yml.j2 │ │ │ ├── netchecker-server-svc.yml.j2 │ │ │ ├── etcd_metrics-service.yml.j2 │ │ │ ├── netchecker-agent-hostnet-clusterrole.yml.j2 │ │ │ ├── netchecker-server-clusterrolebinding.yml.j2 │ │ │ └── netchecker-agent-hostnet-clusterrolebinding.yml.j2 │ ├── container_engine_accelerator │ │ ├── nvidia_gpu │ │ │ └── vars │ │ │ │ ├── centos-7.yml │ │ │ │ ├── ubuntu-16.yml │ │ │ │ └── ubuntu-18.yml │ │ └── meta │ │ │ └── main.yml │ ├── ingress_controller │ │ ├── alb_ingress_controller │ │ │ ├── OWNERS │ │ │ ├── templates │ │ │ │ ├── alb-ingress-sa.yml.j2 │ │ │ │ ├── alb-ingress-ns.yml.j2 │ │ │ │ └── alb-ingress-clusterrolebinding.yml.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── ingress_nginx │ │ │ └── templates │ │ │ │ ├── 00-namespace.yml.j2 │ │ │ │ ├── sa-ingress-nginx.yml.j2 │ │ │ │ ├── cm-ingress-nginx.yml.j2 │ │ │ │ ├── cm-tcp-services.yml.j2 │ │ │ │ └── cm-udp-services.yml.j2 │ │ └── cert_manager │ │ │ └── defaults │ │ │ └── main.yml │ ├── policy_controller │ │ ├── calico │ │ │ ├── templates │ │ │ │ ├── calico-kube-sa.yml.j2 │ │ │ │ └── calico-kube-crb.yml.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── external_provisioner │ │ ├── local_path_provisioner │ │ │ ├── templates │ │ │ │ ├── local-path-storage-ns.yml.j2 │ │ │ │ ├── local-path-storage-sa.yml.j2 │ │ │ │ ├── local-path-storage-psp-cr.yml.j2 │ │ │ │ └── local-path-storage-sc.yml.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── rbd_provisioner │ │ │ └── templates │ │ │ │ ├── sa-rbd-provisioner.yml.j2 │ │ │ │ ├── 00-namespace.yml.j2 │ │ │ │ ├── clusterrolebinding-rbd-provisioner.yml.j2 │ │ │ │ ├── role-rbd-provisioner.yml.j2 │ │ │ │ └── rolebinding-rbd-provisioner.yml.j2 │ │ ├── cephfs_provisioner │ │ │ ├── templates │ │ │ │ ├── sa-cephfs-provisioner.yml.j2 │ │ │ │ ├── 00-namespace.yml.j2 │ │ │ │ ├── secret-cephfs-provisioner.yml.j2 │ │ │ │ ├── clusterrolebinding-cephfs-provisioner.yml.j2 │ │ │ │ ├── role-cephfs-provisioner.yml.j2 │ │ │ │ └── rolebinding-cephfs-provisioner.yml.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ └── local_volume_provisioner │ │ │ └── templates │ │ │ ├── local-volume-provisioner-sa.yml.j2 │ │ │ ├── local-volume-provisioner-ns.yml.j2 │ │ │ ├── local-volume-provisioner-clusterrole.yml.j2 │ │ │ ├── local-volume-provisioner-psp-cr.yml.j2 │ │ │ └── local-volume-provisioner-psp-role.yml.j2 │ ├── metrics_server │ │ ├── templates │ │ │ ├── metrics-server-sa.yaml.j2 │ │ │ ├── resource-reader.yaml.j2 │ │ │ ├── auth-delegator.yaml.j2 │ │ │ ├── metrics-server-service.yaml.j2 │ │ │ ├── resource-reader-clusterrolebinding.yaml.j2 │ │ │ ├── auth-reader.yaml.j2 │ │ │ └── metrics-apiservice.yaml.j2 │ │ └── defaults │ │ │ └── main.yml │ └── cloud_controller │ │ └── oci │ │ └── defaults │ │ └── main.yml ├── kubernetes │ ├── secrets │ │ ├── files │ │ │ └── certs │ │ │ │ └── .gitkeep │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── handlers │ │ │ └── main.yml │ ├── node │ │ ├── vars │ │ │ ├── fedora.yml │ │ │ ├── ubuntu-18.yml │ │ │ ├── ubuntu-20.yml │ │ │ └── ubuntu-22.yml │ │ ├── templates │ │ │ ├── cloud-configs │ │ │ │ └── gce-cloud-config.j2 │ │ │ └── http-proxy.conf.j2 │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── loadbalancer │ │ │ │ └── kube-vip.yml │ │ └── handlers │ │ │ └── main.yml │ ├── preinstall │ │ ├── files │ │ │ └── dhclient_nodnsupdate │ │ ├── vars │ │ │ ├── suse.yml │ │ │ ├── amazon.yml │ │ │ ├── fedora.yml │ │ │ ├── ubuntu.yml │ │ │ ├── debian.yml │ │ │ ├── debian-11.yml │ │ │ ├── centos.yml │ │ │ └── redhat.yml │ │ ├── templates │ │ │ ├── ansible_git.j2 │ │ │ └── resolvconf.j2 │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── 0061-systemd-resolved.yml │ ├── control-plane │ │ ├── meta │ │ │ └── main.yml │ │ ├── vars │ │ │ └── main.yaml │ │ └── templates │ │ │ ├── k8s-certs-renew.service.j2 │ │ │ ├── k8s-certs-renew.timer.j2 │ │ │ ├── secrets_encryption.yaml.j2 │ │ │ ├── admission-controls.v1beta2.yaml.j2 │ │ │ ├── eventratelimit.v1beta2.yaml.j2 │ │ │ └── apiserver-audit-webhook-config.yaml.j2 │ ├── client │ │ └── defaults │ │ │ └── main.yml │ ├── kubeadm │ │ ├── handlers │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ └── tokens │ │ └── tasks │ │ └── main.yml ├── container-engine │ ├── cri-o │ │ ├── vars │ │ │ ├── rocky-8.yml │ │ │ ├── almalinux-8.yml │ │ │ ├── oraclelinux-8.yml │ │ │ ├── redhat.yml │ │ │ ├── fedora-36.yml │ │ │ ├── clearlinux.yml │ │ │ ├── fedora.yml │ │ │ ├── centos-7.yml │ │ │ ├── centos-8.yml │ │ │ └── amazon.yml │ │ ├── files │ │ │ └── mounts.conf │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── prepare.yml │ │ │ │ └── converge.yml │ │ ├── templates │ │ │ ├── http-proxy.conf.j2 │ │ │ ├── config.json.j2 │ │ │ └── unqualified.conf.j2 │ │ └── handlers │ │ │ └── main.yml │ ├── crun │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── containerd-common │ │ └── vars │ │ │ ├── suse.yml │ │ │ └── amazon.yml │ ├── youki │ │ ├── defaults │ │ │ └── main.yml │ │ └── molecule │ │ │ └── default │ │ │ ├── files │ │ │ ├── container.json │ │ │ ├── sandbox.json │ │ │ └── 10-mynet.conf │ │ │ └── converge.yml │ ├── docker │ │ ├── vars │ │ │ ├── clearlinux.yml │ │ │ ├── suse.yml │ │ │ └── amazon.yml │ │ ├── templates │ │ │ ├── docker-orphan-cleanup.conf.j2 │ │ │ ├── http-proxy.conf.j2 │ │ │ ├── docker-dns.conf.j2 │ │ │ ├── fedora_docker.repo.j2 │ │ │ └── rh_docker.repo.j2 │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── prepare.yml │ │ │ │ └── converge.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── docker_plugin.yml │ ├── runc │ │ └── defaults │ │ │ └── main.yml │ ├── cri-dockerd │ │ ├── meta │ │ │ └── main.yml │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── files │ │ │ │ ├── container.json │ │ │ │ ├── sandbox.json │ │ │ │ └── 10-mynet.conf │ │ │ │ └── converge.yml │ │ └── templates │ │ │ └── cri-dockerd.socket.j2 │ ├── crictl │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── crictl.yaml.j2 │ │ └── handlers │ │ │ └── main.yml │ ├── kata-containers │ │ ├── OWNERS │ │ ├── templates │ │ │ └── containerd-shim-kata-v2.j2 │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── files │ │ │ │ ├── container.json │ │ │ │ ├── sandbox.json │ │ │ │ └── 10-mynet.conf │ │ │ │ └── converge.yml │ │ └── defaults │ │ │ └── main.yml │ ├── containerd │ │ ├── meta │ │ │ └── main.yml │ │ ├── vars │ │ │ ├── debian.yml │ │ │ └── ubuntu.yml │ │ ├── molecule │ │ │ └── default │ │ │ │ └── converge.yml │ │ └── templates │ │ │ └── http-proxy.conf.j2 │ ├── gvisor │ │ └── molecule │ │ │ └── default │ │ │ ├── files │ │ │ ├── container.json │ │ │ ├── sandbox.json │ │ │ └── 10-mynet.conf │ │ │ └── converge.yml │ └── nerdctl │ │ └── handlers │ │ └── main.yml ├── download │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── extract_file.yml ├── bastion-ssh-config │ ├── defaults │ │ └── main.yml │ └── molecule │ │ └── default │ │ └── converge.yml ├── recover_control_plane │ ├── control-plane │ │ └── defaults │ │ │ └── main.yml │ └── OWNERS ├── reset │ └── defaults │ │ └── main.yml ├── network_plugin │ ├── calico │ │ ├── vars │ │ │ ├── debian.yml │ │ │ ├── centos-9.yml │ │ │ ├── fedora.yml │ │ │ ├── opensuse.yml │ │ │ ├── redhat-9.yml │ │ │ ├── redhat.yml │ │ │ └── amazon.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── calico-node-sa.yml.j2 │ │ │ ├── calicoctl.etcd.sh.j2 │ │ │ ├── calico-ipamconfig.yml.j2 │ │ │ ├── calicoctl.kdd.sh.j2 │ │ │ ├── calico-crb.yml.j2 │ │ │ └── calico-apiserver-ns.yml.j2 │ │ └── rr │ │ │ ├── defaults │ │ │ └── main.yml │ │ │ └── tasks │ │ │ └── pre.yml │ ├── cilium │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ ├── reset.yml │ │ │ └── reset_iface.yml │ │ └── templates │ │ │ ├── cilium │ │ │ ├── sa.yml.j2 │ │ │ └── secret.yml.j2 │ │ │ ├── cilium-operator │ │ │ ├── sa.yml.j2 │ │ │ └── crb.yml.j2 │ │ │ └── 000-cilium-portmap.conflist.j2 │ ├── flannel │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── macvlan │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── 99-loopback.conf.j2 │ │ │ ├── coreos-device-macvlan.cfg.j2 │ │ │ ├── coreos-interface-macvlan.cfg.j2 │ │ │ ├── centos-postdown-macvlan.cfg.j2 │ │ │ ├── centos-postup-macvlan.cfg.j2 │ │ │ ├── coreos-service-nat_ouside.j2 │ │ │ ├── centos-routes-macvlan.cfg.j2 │ │ │ ├── centos-network-macvlan.cfg.j2 │ │ │ └── 10-macvlan.conf.j2 │ │ ├── OWNERS │ │ ├── files │ │ │ ├── ifup-local │ │ │ └── ifdown-local │ │ └── defaults │ │ │ └── main.yml │ ├── multus │ │ ├── meta │ │ │ └── main.yml │ │ └── files │ │ │ ├── multus-serviceaccount.yml │ │ │ └── multus-clusterrolebinding.yml │ ├── weave │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── 10-weave.conflist.j2 │ ├── kube-router │ │ ├── meta │ │ │ └── main.yml │ │ └── OWNERS │ ├── kube-ovn │ │ └── OWNERS │ ├── canal │ │ ├── templates │ │ │ ├── canal-node-sa.yml.j2 │ │ │ ├── calicoctl.sh.j2 │ │ │ ├── canal-crb-calico.yml.j2 │ │ │ └── canal-crb-flannel.yml.j2 │ │ └── handlers │ │ │ └── main.yml │ └── cni │ │ └── tasks │ │ └── main.yml ├── kubespray-defaults │ ├── meta │ │ └── main.yml │ ├── vars │ │ └── main.yml │ └── tasks │ │ └── fallback_ips_gather.yml ├── bootstrap-os │ ├── handlers │ │ └── main.yml │ ├── molecule │ │ └── default │ │ │ ├── converge.yml │ │ │ └── tests │ │ │ └── test_default.py │ └── tasks │ │ ├── bootstrap-clearlinux.yml │ │ └── bootstrap-amazon.yml ├── win_nodes │ └── kubernetes_patch │ │ └── defaults │ │ └── main.yml ├── helm-apps │ └── vars │ │ └── main.yml ├── remove-node │ ├── pre-remove │ │ └── defaults │ │ │ └── main.yml │ └── post-remove │ │ └── tasks │ │ └── main.yml ├── adduser │ ├── vars │ │ ├── coreos.yml │ │ ├── debian.yml │ │ └── redhat.yml │ └── molecule │ │ └── default │ │ └── converge.yml ├── upgrade │ └── post-upgrade │ │ └── defaults │ │ └── main.yml ├── etcdctl │ └── templates │ │ └── etcdctl.sh.j2 └── etcd │ ├── templates │ ├── etcd-host.service.j2 │ └── etcd-events-host.service.j2 │ ├── meta │ └── main.yml │ └── tasks │ └── refresh_config.yml ├── .markdownlint.yaml ├── contrib ├── dind │ ├── requirements.txt │ ├── group_vars │ │ └── all │ │ │ └── all.yaml │ ├── dind-cluster.yaml │ ├── roles │ │ └── dind-host │ │ │ └── templates │ │ │ └── inventory_builder.sh.j2 │ ├── test-some_distros-most_CNIs.env │ ├── hosts │ └── test-some_distros-kube_router_combo.env ├── terraform │ ├── metal │ │ ├── hosts │ │ ├── sample-inventory │ │ │ └── group_vars │ │ ├── versions.tf │ │ └── output.tf │ ├── openstack │ │ ├── hosts │ │ ├── sample-inventory │ │ │ └── group_vars │ │ ├── .gitignore │ │ ├── modules │ │ │ ├── compute │ │ │ │ ├── versions.tf │ │ │ │ └── ansible_bastion_template.txt │ │ │ ├── network │ │ │ │ ├── versions.tf │ │ │ │ └── variables.tf │ │ │ └── ips │ │ │ │ └── versions.tf │ │ └── versions.tf │ ├── group_vars │ ├── aws │ │ ├── .gitignore │ │ ├── sample-inventory │ │ │ └── group_vars │ │ ├── modules │ │ │ ├── iam │ │ │ │ ├── variables.tf │ │ │ │ └── outputs.tf │ │ │ ├── nlb │ │ │ │ └── outputs.tf │ │ │ └── vpc │ │ │ │ └── outputs.tf │ │ ├── docs │ │ │ └── aws_kubespray.png │ │ ├── credentials.tfvars.example │ │ └── templates │ │ │ └── inventory.tpl │ ├── exoscale │ │ ├── sample-inventory │ │ │ └── group_vars │ │ ├── modules │ │ │ └── kubernetes-cluster │ │ │ │ └── versions.tf │ │ ├── versions.tf │ │ ├── templates │ │ │ └── inventory.tpl │ │ └── output.tf │ ├── upcloud │ │ ├── sample-inventory │ │ │ └── group_vars │ │ ├── versions.tf │ │ ├── modules │ │ │ └── kubernetes-cluster │ │ │ │ └── versions.tf │ │ ├── output.tf │ │ └── templates │ │ │ └── inventory.tpl │ ├── vsphere │ │ ├── sample-inventory │ │ │ └── group_vars │ │ ├── modules │ │ │ └── kubernetes-cluster │ │ │ │ ├── templates │ │ │ │ ├── cloud-init.tpl │ │ │ │ └── metadata.tpl │ │ │ │ ├── versions.tf │ │ │ │ └── output.tf │ │ ├── templates │ │ │ └── inventory.tpl │ │ └── versions.tf │ ├── OWNERS │ ├── hetzner │ │ ├── output.tf │ │ ├── modules │ │ │ └── kubernetes-cluster │ │ │ │ ├── versions.tf │ │ │ │ └── templates │ │ │ │ └── cloud-init.tmpl │ │ ├── templates │ │ │ └── inventory.tpl │ │ └── versions.tf │ └── gcp │ │ └── output.tf ├── azurerm │ ├── .gitignore │ ├── generate-inventory.yml │ ├── generate-templates.yml │ ├── generate-inventory_2.yml │ ├── roles │ │ ├── generate-templates │ │ │ └── templates │ │ │ │ └── clear-rg.json │ │ ├── generate-inventory_2 │ │ │ └── templates │ │ │ │ └── loadbalancer_vars.j2 │ │ └── generate-inventory │ │ │ └── tasks │ │ │ └── main.yml │ └── clear-rg.sh ├── aws_inventory │ └── requirements.txt ├── network-storage │ ├── heketi │ │ ├── requirements.txt │ │ ├── roles │ │ │ ├── provision │ │ │ │ ├── defaults │ │ │ │ │ └── main.yml │ │ │ │ ├── handlers │ │ │ │ │ └── main.yml │ │ │ │ ├── tasks │ │ │ │ │ └── cleanup.yml │ │ │ │ └── templates │ │ │ │ │ ├── heketi-service-account.json.j2 │ │ │ │ │ └── storageclass.yml.j2 │ │ │ └── tear-down-disks │ │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── heketi.yml │ │ └── heketi-tear-down.yml │ └── glusterfs │ │ ├── group_vars │ │ └── roles │ │ ├── bootstrap-os │ │ ├── glusterfs │ │ ├── server │ │ │ ├── templates │ │ │ │ └── test-file.txt │ │ │ ├── vars │ │ │ │ ├── Debian.yml │ │ │ │ └── RedHat.yml │ │ │ └── tasks │ │ │ │ └── setup-RedHat.yml │ │ └── client │ │ │ ├── tasks │ │ │ └── setup-RedHat.yml │ │ │ └── defaults │ │ │ └── main.yml │ │ └── kubernetes-pv │ │ ├── meta │ │ └── main.yaml │ │ └── ansible │ │ └── templates │ │ ├── glusterfs-kubernetes-endpoint-svc.json.j2 │ │ └── glusterfs-kubernetes-pv.yml.j2 ├── offline │ ├── docker-daemon.json │ └── registries.conf ├── os-services │ ├── roles │ │ └── prepare │ │ │ └── defaults │ │ │ └── main.yml │ └── os-services.yml ├── inventory_builder │ ├── test-requirements.txt │ ├── requirements.txt │ └── setup.cfg ├── kvm-setup │ ├── group_vars │ │ └── all │ ├── kvm-setup.yml │ └── README.md ├── aws_iam │ ├── kubernetes-master-role.json │ └── kubernetes-minion-role.json └── misc │ └── clusteradmin-rbac.yml ├── tests ├── requirements.txt ├── local_inventory │ └── hosts.cfg ├── files │ ├── packet_ubuntu20-calico-aio-ansible-2_11.yml │ ├── vagrant_ubuntu18-flannel.yml │ ├── vagrant_ubuntu20-flannel.yml │ ├── vagrant_ubuntu18-weave-medium.yml │ ├── packet_debian11-calico.yml │ ├── packet_amazon-linux-2-aio.yml │ ├── vagrant_ubuntu18-calico-dual-stack.yml │ ├── tf-elastx_ubuntu18-calico.yml │ ├── packet_ubuntu18-calico-aio.yml │ ├── packet_ubuntu18-calico-ha-recover.yml │ ├── packet_ubuntu18-calico-ha-recover-noquorum.yml │ ├── packet_fedora34-kube-ovn.yml │ ├── packet_centos7-multus-calico.yml │ ├── tf-ovh_ubuntu18-calico.yml │ ├── vagrant_fedora35-kube-router.yml │ ├── packet_almalinux8-calico-remove-node.yml │ ├── packet_ubuntu16-canal-ha.yml │ ├── packet_almalinux8-kube-ovn.yml │ ├── packet_almalinux8-crio.yml │ ├── packet_ubuntu16-canal-sep.yml │ ├── vagrant_centos7-kube-router.yml │ ├── vagrant_ubuntu16-kube-router-sep.yml │ ├── packet_debian10-docker.yml │ ├── packet_debian11-docker.yml │ ├── packet_oracle7-canal-ha.yml │ ├── packet_ubuntu18-crio.yml │ ├── packet_opensuse-canal.yml │ ├── packet_debian10-calico.yml │ ├── vagrant_ubuntu18-weave-medium.rb │ ├── packet_ubuntu18-cilium-sep.yml │ ├── packet_ubuntu18-aio-docker.yml │ ├── packet_debian10-cilium-svc-proxy.yml │ ├── vagrant_ubuntu16-kube-router-svc-proxy.yml │ ├── packet_almalinux8-calico-nodelocaldns-secondary.yml │ ├── packet_almalinux8-docker.yml │ ├── packet_opensuse-docker-cilium.yml │ ├── packet_debian9-macvlan.yml │ ├── vagrant_ubuntu18-flannel.rb │ ├── packet_almalinux8-calico.yml │ ├── packet_fedora34-docker-weave.yml │ ├── packet_centos7-calico-ha.yml │ ├── packet_ubuntu16-flannel-ha.yml │ ├── vagrant_ubuntu18-calico-dual-stack.rb │ ├── vagrant_ubuntu20-flannel.rb │ ├── packet_almalinux8-calico-ha-ebpf.yml │ ├── vagrant_ubuntu16-kube-router-svc-proxy.rb │ ├── packet_ubuntu20-calico-aio.yml │ ├── packet_ubuntu22-calico-aio.yml │ ├── packet_debian10-calico-upgrade.yml │ ├── packet_centos7-docker-weave-upgrade-ha.yml │ ├── packet_ubuntu16-docker-weave-sep.yml │ ├── packet_centos7-calico-ha-once-localhost.yml │ ├── vagrant_centos7-kube-router.rb │ ├── vagrant_fedora35-kube-router.rb │ └── vagrant_ubuntu16-kube-router-sep.rb ├── scripts │ ├── md-table │ │ ├── requirements.txt │ │ └── test.sh │ ├── delete-tf.sh │ ├── create-tf.sh │ ├── testcases_cleanup.sh │ ├── terraform_install.sh │ ├── molecule_logs.sh │ ├── vagrant-validate.sh │ └── vagrant_clean.sh ├── common │ └── _kubespray_test_settings.yml ├── templates │ └── fake_hosts.yml.j2 ├── cloud_playbooks │ ├── templates │ │ ├── gcs_life.json.j2 │ │ └── boto.j2 │ ├── wait-for-ssh.yml │ ├── create-packet.yml │ └── delete-packet.yml ├── requirements-2.11.txt ├── requirements-2.12.txt ├── run-tests.sh └── ansible.cfg ├── inventory ├── local │ ├── group_vars │ └── hosts.ini ├── prod │ └── group_vars │ │ ├── all │ │ ├── download.yml │ │ ├── coreos.yml │ │ ├── cri-o.yml │ │ └── aws.yml │ │ ├── k8s_cluster │ │ ├── k8s-net-cilium.yml │ │ ├── k8s-net-macvlan.yml │ │ └── k8s-net-canal.yml │ │ └── kube_ingress.yml ├── s000 │ └── group_vars │ │ ├── all │ │ ├── download.yml │ │ ├── coreos.yml │ │ ├── cri-o.yml │ │ └── aws.yml │ │ ├── k8s_cluster │ │ ├── k8s-net-cilium.yml │ │ ├── k8s-net-macvlan.yml │ │ └── k8s-net-canal.yml │ │ └── kube_ingress.yml └── sample │ └── group_vars │ ├── all │ ├── coreos.yml │ ├── cri-o.yml │ └── aws.yml │ └── k8s_cluster │ ├── k8s-net-macvlan.yml │ └── k8s-net-canal.yml ├── scripts ├── openstack-cleanup │ ├── .gitignore │ └── requirements.txt └── gitlab-branch-cleanup │ ├── .gitignore │ └── requirements.txt ├── _config.yml ├── logo ├── logos.pdf ├── logo-clear.png ├── logo-dark.png ├── OWNERS ├── logo-text-clear.png ├── logo-text-dark.png ├── logo-text-mixed.png └── LICENSE ├── docs ├── img │ └── kubernetes-logo.png ├── figures │ ├── kubespray-calico-rr.png │ └── loadbalancer_localhost.png ├── roadmap.md ├── calico_peer_example │ ├── paris.yml │ └── new-york.yml ├── opensuse.md └── cloud.md ├── test-infra ├── image-builder │ ├── Makefile │ ├── hosts.ini │ ├── cluster.yml │ ├── OWNERS │ └── roles │ │ └── kubevirt-images │ │ └── templates │ │ └── Dockerfile └── vagrant-docker │ └── build.sh ├── code-of-conduct.md ├── OWNERS ├── Makefile ├── requirements-2.11.txt ├── requirements-2.12.txt ├── .github └── ISSUE_TEMPLATE │ ├── enhancement.md │ └── failing-test.md ├── _scale_cluster.sh ├── _upgrade_cluster.sh ├── .editorconfig ├── .yamllint ├── _deploy_cluster.sh └── OWNERS_ALIASES /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | kubespray.io -------------------------------------------------------------------------------- /extra_playbooks/roles: -------------------------------------------------------------------------------- 1 | ../roles -------------------------------------------------------------------------------- /extra_playbooks/inventory: -------------------------------------------------------------------------------- 1 | ../inventory -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requirements-2.12.txt -------------------------------------------------------------------------------- /roles/kubernetes-apps/helm/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | MD013: false 3 | -------------------------------------------------------------------------------- /contrib/dind/requirements.txt: -------------------------------------------------------------------------------- 1 | docker 2 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | requirements-2.12.txt -------------------------------------------------------------------------------- /contrib/terraform/metal/hosts: -------------------------------------------------------------------------------- 1 | ../terraform.py -------------------------------------------------------------------------------- /contrib/terraform/openstack/hosts: -------------------------------------------------------------------------------- 1 | ../terraform.py -------------------------------------------------------------------------------- /inventory/local/group_vars: -------------------------------------------------------------------------------- 1 | ../sample/group_vars -------------------------------------------------------------------------------- /roles/kubernetes/secrets/files/certs/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/kubernetes/secrets/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /scripts/openstack-cleanup/.gitignore: -------------------------------------------------------------------------------- 1 | openrc 2 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | theme: jekyll-theme-slate 3 | -------------------------------------------------------------------------------- /contrib/azurerm/.gitignore: -------------------------------------------------------------------------------- 1 | .generated 2 | /inventory -------------------------------------------------------------------------------- /contrib/aws_inventory/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 # Apache-2.0 -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/rocky-8.yml: -------------------------------------------------------------------------------- 1 | centos-8.yml -------------------------------------------------------------------------------- /contrib/network-storage/heketi/requirements.txt: -------------------------------------------------------------------------------- 1 | jmespath 2 | -------------------------------------------------------------------------------- /contrib/terraform/group_vars: -------------------------------------------------------------------------------- 1 | ../../inventory/local/group_vars -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/almalinux-8.yml: -------------------------------------------------------------------------------- 1 | centos-8.yml -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/oraclelinux-8.yml: -------------------------------------------------------------------------------- 1 | centos-8.yml -------------------------------------------------------------------------------- /roles/download/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | allow_duplicates: true 3 | -------------------------------------------------------------------------------- /scripts/gitlab-branch-cleanup/.gitignore: -------------------------------------------------------------------------------- 1 | openrc 2 | venv 3 | -------------------------------------------------------------------------------- /scripts/gitlab-branch-cleanup/requirements.txt: -------------------------------------------------------------------------------- 1 | python-gitlab 2 | -------------------------------------------------------------------------------- /scripts/openstack-cleanup/requirements.txt: -------------------------------------------------------------------------------- 1 | openstacksdk>=0.43.0 2 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/roles/provision/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /tests/local_inventory/hosts.cfg: -------------------------------------------------------------------------------- 1 | localhost ansible_connection=local 2 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/all/download.yml: -------------------------------------------------------------------------------- 1 | download_run_once: True 2 | 3 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/all/download.yml: -------------------------------------------------------------------------------- 1 | download_run_once: True 2 | 3 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/helm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | helm_enabled: false 3 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/group_vars: -------------------------------------------------------------------------------- 1 | ../../../inventory/local/group_vars -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/bootstrap-os: -------------------------------------------------------------------------------- 1 | ../../../../roles/bootstrap-os -------------------------------------------------------------------------------- /contrib/offline/docker-daemon.json: -------------------------------------------------------------------------------- 1 | { "insecure-registries":["HOSTNAME:5000"] } 2 | -------------------------------------------------------------------------------- /roles/kubernetes/secrets/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_cert_group: kube-cert 3 | -------------------------------------------------------------------------------- /contrib/terraform/aws/.gitignore: -------------------------------------------------------------------------------- 1 | *.tfstate* 2 | .terraform.lock.hcl 3 | .terraform 4 | -------------------------------------------------------------------------------- /contrib/terraform/aws/sample-inventory/group_vars: -------------------------------------------------------------------------------- 1 | ../../../../inventory/sample/group_vars -------------------------------------------------------------------------------- /logo/logos.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/logo/logos.pdf -------------------------------------------------------------------------------- /roles/container-engine/cri-o/files/mounts.conf: -------------------------------------------------------------------------------- 1 | /usr/share/rhel/secrets:/run/secrets 2 | -------------------------------------------------------------------------------- /roles/container-engine/crun/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | crun_bin_dir: /usr/bin/ 4 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu20-calico-aio-ansible-2_11.yml: -------------------------------------------------------------------------------- 1 | packet_ubuntu20-calico-aio.yml -------------------------------------------------------------------------------- /contrib/terraform/exoscale/sample-inventory/group_vars: -------------------------------------------------------------------------------- 1 | ../../../../inventory/sample/group_vars -------------------------------------------------------------------------------- /contrib/terraform/metal/sample-inventory/group_vars: -------------------------------------------------------------------------------- 1 | ../../../../inventory/sample/group_vars -------------------------------------------------------------------------------- /contrib/terraform/upcloud/sample-inventory/group_vars: -------------------------------------------------------------------------------- 1 | ../../../../inventory/sample/group_vars/ -------------------------------------------------------------------------------- /contrib/terraform/vsphere/sample-inventory/group_vars: -------------------------------------------------------------------------------- 1 | ../../../../inventory/sample/group_vars -------------------------------------------------------------------------------- /roles/bastion-ssh-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssh_bastion_confing__name: ssh-bastion.conf -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/glusterfs/server/templates/test-file.txt: -------------------------------------------------------------------------------- 1 | test file 2 | -------------------------------------------------------------------------------- /contrib/os-services/roles/prepare/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | disable_service_firewall: false 3 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/sample-inventory/group_vars: -------------------------------------------------------------------------------- 1 | ../../../../inventory/sample/group_vars -------------------------------------------------------------------------------- /logo/logo-clear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/logo/logo-clear.png -------------------------------------------------------------------------------- /logo/logo-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/logo/logo-dark.png -------------------------------------------------------------------------------- /roles/container-engine/containerd-common/vars/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | containerd_package: containerd 3 | -------------------------------------------------------------------------------- /roles/container-engine/youki/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | youki_bin_dir: "{{ bin_dir }}" 4 | -------------------------------------------------------------------------------- /roles/recover_control_plane/control-plane/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | bin_dir: /usr/local/bin 3 | -------------------------------------------------------------------------------- /roles/reset/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | flush_iptables: true 3 | reset_restart_network: true 4 | -------------------------------------------------------------------------------- /contrib/inventory_builder/test-requirements.txt: -------------------------------------------------------------------------------- 1 | hacking>=0.10.2 2 | pytest>=2.8.0 3 | mock>=1.3.0 4 | -------------------------------------------------------------------------------- /contrib/os-services/os-services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - { role: prepare } 5 | -------------------------------------------------------------------------------- /logo/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - thomeced 5 | -------------------------------------------------------------------------------- /roles/container-engine/containerd-common/vars/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | containerd_package: containerd 3 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | gcp_pd_csi_controller_replicas: 1 3 | -------------------------------------------------------------------------------- /roles/kubernetes/node/vars/fedora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_resolv_conf: "/run/systemd/resolve/resolv.conf" 3 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_packages: 3 | - wireguard 4 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: network_plugin/cni 4 | -------------------------------------------------------------------------------- /roles/network_plugin/flannel/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: network_plugin/cni 4 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: network_plugin/cni 4 | -------------------------------------------------------------------------------- /roles/network_plugin/multus/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: network_plugin/cni 4 | -------------------------------------------------------------------------------- /roles/network_plugin/weave/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: network_plugin/cni 4 | -------------------------------------------------------------------------------- /contrib/inventory_builder/requirements.txt: -------------------------------------------------------------------------------- 1 | configparser>=3.3.0 2 | ruamel.yaml>=0.15.88 3 | ipaddress 4 | -------------------------------------------------------------------------------- /contrib/inventory_builder/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = kubespray-inventory-builder 3 | version = 0.1 4 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/roles/tear-down-disks/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | heketi_remove_lvm: false 3 | -------------------------------------------------------------------------------- /logo/logo-text-clear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/logo/logo-text-clear.png -------------------------------------------------------------------------------- /logo/logo-text-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/logo/logo-text-dark.png -------------------------------------------------------------------------------- /logo/logo-text-mixed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/logo/logo-text-mixed.png -------------------------------------------------------------------------------- /roles/kubernetes/node/vars/ubuntu-18.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_resolv_conf: "/run/systemd/resolve/resolv.conf" 3 | -------------------------------------------------------------------------------- /roles/kubernetes/node/vars/ubuntu-20.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_resolv_conf: "/run/systemd/resolve/resolv.conf" 3 | -------------------------------------------------------------------------------- /roles/kubernetes/node/vars/ubuntu-22.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_resolv_conf: "/run/systemd/resolve/resolv.conf" 3 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/centos-9.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_packages: 3 | - wireguard-tools 4 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/fedora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_packages: 3 | - wireguard-tools 4 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/opensuse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_packages: 3 | - wireguard-tools 4 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/redhat-9.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_packages: 3 | - wireguard-tools 4 | -------------------------------------------------------------------------------- /roles/network_plugin/kube-router/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: network_plugin/cni 4 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu18-flannel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubespray settings 3 | kube_network_plugin: flannel 4 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu20-flannel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubespray settings 3 | kube_network_plugin: flannel 4 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/glusterfs/server/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | glusterfs_daemon: glusterd 3 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/glusterfs/server/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | glusterfs_daemon: glusterd 3 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/k8s_cluster/k8s-net-cilium.yml: -------------------------------------------------------------------------------- 1 | # see roles/network_plugin/cilium/defaults/main.yml 2 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/k8s_cluster/k8s-net-cilium.yml: -------------------------------------------------------------------------------- 1 | # see roles/network_plugin/cilium/defaults/main.yml 2 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crio_packages: 3 | - cri-o 4 | - oci-systemd-hook 5 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/files/dhclient_nodnsupdate: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make_resolv_conf() { 3 | : 4 | } 5 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu18-weave-medium.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubespray settings 3 | kube_network_plugin: weave 4 | -------------------------------------------------------------------------------- /tests/scripts/md-table/requirements.txt: -------------------------------------------------------------------------------- 1 | pyaml 2 | jinja2 3 | pathlib ; python_version < '3.10' 4 | pydblite 5 | -------------------------------------------------------------------------------- /docs/img/kubernetes-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/docs/img/kubernetes-logo.png -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/fedora-36.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crio_packages: 3 | - cri-o 4 | 5 | crio_version: 1.24 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/network_plugin/calico/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # TODO: Handle Calico etcd -> kdd migration 3 | -------------------------------------------------------------------------------- /tests/files/packet_debian11-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-11 4 | mode: default 5 | -------------------------------------------------------------------------------- /contrib/dind/group_vars/all/all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # See distro.yaml for supported node_distro images 3 | node_distro: debian 4 | -------------------------------------------------------------------------------- /contrib/kvm-setup/group_vars/all: -------------------------------------------------------------------------------- 1 | #k8s_deployment_user: kubespray 2 | #k8s_deployment_user_pkey_path: /tmp/ssh_rsa 3 | 4 | -------------------------------------------------------------------------------- /roles/container-engine/docker/vars/clearlinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_package_info: 3 | pkgs: 4 | - "containers-basic" 5 | -------------------------------------------------------------------------------- /roles/kubernetes/node/templates/cloud-configs/gce-cloud-config.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | node-tags = {{ gce_node_tags }} 3 | 4 | -------------------------------------------------------------------------------- /tests/files/packet_amazon-linux-2-aio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: amazon-linux-2 4 | mode: aio 5 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu18-calico-dual-stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubespray settings 3 | enable_dual_stack_networks: true 4 | -------------------------------------------------------------------------------- /contrib/terraform/aws/modules/iam/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_cluster_name" { 2 | description = "Name of Cluster" 3 | } 4 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/all/coreos.yml: -------------------------------------------------------------------------------- 1 | ## Does coreos need auto upgrade, default is true 2 | # coreos_auto_upgrade: true 3 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/all/coreos.yml: -------------------------------------------------------------------------------- 1 | ## Does coreos need auto upgrade, default is true 2 | # coreos_auto_upgrade: true 3 | -------------------------------------------------------------------------------- /inventory/sample/group_vars/all/coreos.yml: -------------------------------------------------------------------------------- 1 | ## Does coreos need auto upgrade, default is true 2 | # coreos_auto_upgrade: true 3 | -------------------------------------------------------------------------------- /roles/container-engine/runc/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | runc_bin_dir: "{{ bin_dir }}" 4 | 5 | runc_package_name: runc 6 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/roles/provision/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "stop port forwarding" 3 | command: "killall " 4 | -------------------------------------------------------------------------------- /contrib/terraform/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - holmsten 5 | - miouge1 6 | -------------------------------------------------------------------------------- /docs/figures/kubespray-calico-rr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/docs/figures/kubespray-calico-rr.png -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_packages: 3 | - wireguard-dkms 4 | - wireguard-tools 5 | -------------------------------------------------------------------------------- /test-infra/image-builder/Makefile: -------------------------------------------------------------------------------- 1 | deploy: 2 | ansible-playbook -i hosts.ini -e docker_password=$(docker_password) cluster.yml 3 | -------------------------------------------------------------------------------- /test-infra/image-builder/hosts.ini: -------------------------------------------------------------------------------- 1 | image-builder-1 ansible_ssh_host=xxx.xxx.xxx.xxx 2 | 3 | [image-builder] 4 | image-builder-1 5 | -------------------------------------------------------------------------------- /contrib/azurerm/generate-inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: False 4 | roles: 5 | - generate-inventory 6 | -------------------------------------------------------------------------------- /contrib/azurerm/generate-templates.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: False 4 | roles: 5 | - generate-templates 6 | -------------------------------------------------------------------------------- /docs/figures/loadbalancer_localhost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/docs/figures/loadbalancer_localhost.png -------------------------------------------------------------------------------- /roles/container-engine/docker/templates/docker-orphan-cleanup.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStartPost=-{{ bin_dir }}/cleanup-docker-orphans.sh -------------------------------------------------------------------------------- /roles/kubernetes-apps/cluster_roles/templates/namespace.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: "kube-system" 5 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - device-mapper 4 | - conntrack-tools 5 | - libseccomp2 6 | -------------------------------------------------------------------------------- /roles/network_plugin/kube-ovn/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | emeritus_approvers: 4 | - oilbeater 5 | -------------------------------------------------------------------------------- /test-infra/image-builder/cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: image-builder 3 | gather_facts: false 4 | roles: 5 | - kubevirt-images 6 | -------------------------------------------------------------------------------- /tests/common/_kubespray_test_settings.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubespray settings for tests 3 | deploy_netchecker: true 4 | dns_min_replicas: 1 5 | -------------------------------------------------------------------------------- /tests/files/tf-elastx_ubuntu18-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sonobuoy_enabled: true 3 | 4 | # Ignore ping errors 5 | ignore_assert_errors: true 6 | -------------------------------------------------------------------------------- /contrib/azurerm/generate-inventory_2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: False 4 | roles: 5 | - generate-inventory_2 6 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | *.tfvars 3 | !sample-inventory\/cluster.tfvars 4 | *.tfstate 5 | *.tfstate.backup 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metallb/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | reviewers: 5 | - oomichi 6 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu18-calico-aio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1804 4 | mode: aio 5 | vm_memory: 1600Mi 6 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/kubernetes-pv/meta/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: kubernetes-pv/ansible, tags: apps} 4 | -------------------------------------------------------------------------------- /contrib/terraform/aws/docs/aws_kubespray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/southbridgeio/kubespray/HEAD/contrib/terraform/aws/docs/aws_kubespray.png -------------------------------------------------------------------------------- /roles/container-engine/docker/vars/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_package_info: 3 | state: latest 4 | pkgs: 5 | - docker 6 | - containerd 7 | -------------------------------------------------------------------------------- /roles/kubespray-defaults/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: download 4 | skip_downloads: true 5 | tags: 6 | - facts 7 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/99-loopback.conf.j2: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.2.0", 3 | "name": "lo", 4 | "type": "loopback" 5 | } 6 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/coreos-device-macvlan.cfg.j2: -------------------------------------------------------------------------------- 1 | [NetDev] 2 | Name=mac0 3 | Kind=macvlan 4 | 5 | [MACVLAN] 6 | Mode=bridge 7 | -------------------------------------------------------------------------------- /tests/templates/fake_hosts.yml.j2: -------------------------------------------------------------------------------- 1 | ansible_default_ipv4: 2 | address: 255.255.255.255 3 | ansible_hostname: "{{ '{{' }}inventory_hostname}}" 4 | -------------------------------------------------------------------------------- /roles/container-engine/cri-dockerd/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: container-engine/docker 4 | - role: container-engine/crictl 5 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: pre.yml 3 | 4 | - import_tasks: repos.yml 5 | 6 | - include_tasks: install.yml 7 | -------------------------------------------------------------------------------- /roles/network_plugin/kube-router/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - bozzo 5 | reviewers: 6 | - bozzo -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - simon 5 | reviewers: 6 | - simon 7 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu18-calico-ha-recover.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1804 4 | mode: ha-recover 5 | vm_memory: 1600Mi 6 | -------------------------------------------------------------------------------- /tests/scripts/delete-tf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | cd .. 5 | terraform -chdir="contrib/terraform/$PROVIDER" destroy -auto-approve 6 | -------------------------------------------------------------------------------- /logo/LICENSE: -------------------------------------------------------------------------------- 1 | # The Kubespray logo files are licensed under a choice of either Apache-2.0 or CC-BY-4.0 (Creative Commons Attribution 4.0 International). 2 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: check.yml 3 | 4 | - include_tasks: install.yml 5 | 6 | - include_tasks: apply.yml 7 | -------------------------------------------------------------------------------- /roles/bootstrap-os/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: RHEL auto-attach subscription 3 | command: /sbin/subscription-manager attach --auto 4 | become: true 5 | -------------------------------------------------------------------------------- /roles/bootstrap-os/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | gather_facts: no 5 | roles: 6 | - role: bootstrap-os 7 | -------------------------------------------------------------------------------- /roles/container-engine/crictl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install crictĺ 3 | include_tasks: crictl.yml 4 | when: not crictl_installed | default(false) 5 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | reviewers: 5 | - alijahnas 6 | - luckySB 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/snapshots/snapshot-controller/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | snapshot_controller_replicas: 1 3 | snapshot_controller_namespace: kube-system 4 | -------------------------------------------------------------------------------- /extra_playbooks/wait-for-cloud-init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | tasks: 4 | - name: Wait for cloud-init to finish 5 | command: cloud-init status --wait 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/argocd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argocd_enabled: false 3 | argocd_version: v2.1.6 4 | argocd_namespace: argocd 5 | # argocd_admin_password: 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/network_plugin/kube-router/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - bozzo 5 | reviewers: 6 | - bozzo -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - alijahnas 5 | reviewers: 6 | -------------------------------------------------------------------------------- /roles/kubernetes/node/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubernetes/secrets 4 | when: not kubeadm_enabled 5 | tags: 6 | - k8s-secrets 7 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/templates/cilium/sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: cilium 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/coreos-interface-macvlan.cfg.j2: -------------------------------------------------------------------------------- 1 | [Match] 2 | Name={{ macvlan_interface }} 3 | 4 | [Network] 5 | MACVLAN=mac0 6 | DHCP=yes 7 | -------------------------------------------------------------------------------- /tests/scripts/create-tf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | cd .. 5 | terraform -chdir="contrib/terraform/$PROVIDER" apply -auto-approve -parallelism=1 6 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/kube_ingress.yml: -------------------------------------------------------------------------------- 1 | node_labels: 2 | node-role.kubernetes.io/ingress: "" 3 | node_taints: 4 | - "node-role.kubernetes.io/ingress=:NoSchedule" 5 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/kube_ingress.yml: -------------------------------------------------------------------------------- 1 | node_labels: 2 | node-role.kubernetes.io/ingress: "" 3 | node_taints: 4 | - "node-role.kubernetes.io/ingress=:NoSchedule" 5 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: False 5 | roles: 6 | - role: bootstrap-os 7 | -------------------------------------------------------------------------------- /roles/container-engine/docker/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: False 5 | roles: 6 | - role: bootstrap-os 7 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - pasqualet 5 | reviewers: 6 | - pasqualet 7 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubernetes/tokens 4 | when: kube_token_auth 5 | tags: 6 | - k8s-secrets 7 | -------------------------------------------------------------------------------- /roles/network_plugin/canal/templates/canal-node-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: canal 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /roles/network_plugin/multus/files/multus-serviceaccount.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: multus 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu18-calico-ha-recover-noquorum.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1804 4 | mode: ha-recover-noquorum 5 | vm_memory: 1600Mi 6 | -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Community Code of Conduct 2 | 3 | Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) 4 | -------------------------------------------------------------------------------- /roles/container-engine/crictl/templates/crictl.yaml.j2: -------------------------------------------------------------------------------- 1 | runtime-endpoint: unix://{{ cri_socket }} 2 | image-endpoint: unix://{{ cri_socket }} 3 | timeout: 30 4 | debug: false 5 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/snapshots/cinder-csi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | snapshot_classes: 3 | - name: cinder-csi-snapshot 4 | is_default: false 5 | force_create: true 6 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - libselinux-python 4 | - device-mapper-libs 5 | - nss 6 | - conntrack-tools 7 | - libseccomp 8 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/templates/calico-node-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: calico-node 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /contrib/kvm-setup/kvm-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: False 4 | become: yes 5 | vars: 6 | - bootstrap_os: none 7 | roles: 8 | - kvm-setup 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: registry 5 | namespace: {{ registry_namespace }} 6 | -------------------------------------------------------------------------------- /test-infra/image-builder/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - woopstar 5 | - ant31 6 | reviewers: 7 | - woopstar 8 | - ant31 9 | -------------------------------------------------------------------------------- /tests/files/packet_fedora34-kube-ovn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: fedora-34 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin: kube-ovn 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_runtimes/crun/templates/runtimeclass-crun.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: RuntimeClass 3 | apiVersion: node.k8s.io/v1 4 | metadata: 5 | name: crun 6 | handler: crun 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_cloud_controller/openstack/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | reviewers: 5 | - alijahnas 6 | - luckySB 7 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # list of admission plugins that needs to be configured 3 | kube_apiserver_admission_plugins_needs_configuration: [EventRateLimit] 4 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/fedora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - libselinux-python3 4 | - device-mapper-libs 5 | - conntrack 6 | - container-selinux 7 | - libseccomp 8 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/templates/cilium-operator/sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: cilium-operator 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/files/ifup-local: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | POSTUPNAME="/etc/sysconfig/network-scripts/post-up-$1" 4 | if [ -x $POSTUPNAME ]; then 5 | exec $POSTUPNAME 6 | fi 7 | -------------------------------------------------------------------------------- /roles/win_nodes/kubernetes_patch/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests" 4 | kube_proxy_nodeselector: "kubernetes.io/os" 5 | -------------------------------------------------------------------------------- /tests/files/packet_centos7-multus-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: centos-7 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin_multus: true 8 | -------------------------------------------------------------------------------- /tests/files/tf-ovh_ubuntu18-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sonobuoy_enabled: true 3 | pkg_install_retries: 25 4 | retry_stagger: 10 5 | 6 | # Ignore ping errors 7 | ignore_assert_errors: true 8 | -------------------------------------------------------------------------------- /tests/files/vagrant_fedora35-kube-router.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: fedora-35 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin: kube-router 8 | -------------------------------------------------------------------------------- /roles/helm-apps/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | helm_defaults: 3 | atomic: true 4 | binary_path: "{{ bin_dir }}/helm" 5 | 6 | helm_repository_defaults: 7 | binary_path: "{{ bin_dir }}/helm" 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/argocd/templates/argocd-namespace.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{argocd_namespace}} 6 | labels: 7 | app: argocd 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_runtimes/youki/templates/runtimeclass-youki.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: RuntimeClass 3 | apiVersion: node.k8s.io/v1 4 | metadata: 5 | name: youki 6 | handler: youki 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/krew/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | krew_enabled: false 3 | krew_root_dir: "/usr/local/krew" 4 | krew_default_index_uri: https://github.com/kubernetes-sigs/krew-index.git 5 | -------------------------------------------------------------------------------- /roles/recover_control_plane/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - qvicksilver 5 | - yujunz 6 | reviewers: 7 | - qvicksilver 8 | - yujunz 9 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-calico-remove-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: ha 5 | 6 | # Kubespray settings 7 | auto_renew_certificates: true 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-agent-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: netchecker-agent 5 | namespace: {{ netcheck_namespace }} 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-server-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: netchecker-server 5 | namespace: {{ netcheck_namespace }} 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_runtimes/gvisor/templates/runtimeclass-gvisor.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: RuntimeClass 3 | apiVersion: node.k8s.io/v1 4 | metadata: 5 | name: gvisor 6 | handler: runsc 7 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/templates/ansible_git.j2: -------------------------------------------------------------------------------- 1 | ; This file contains the information which identifies the deployment state relative to the git repo 2 | [default] 3 | {{ gitinfo.stdout }} 4 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/files/ifdown-local: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | POSTDOWNNAME="/etc/sysconfig/network-scripts/post-down-$1" 4 | if [ -x $POSTDOWNNAME ]; then 5 | exec $POSTDOWNNAME 6 | fi 7 | -------------------------------------------------------------------------------- /roles/remove-node/pre-remove/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | allow_ungraceful_removal: false 3 | drain_grace_period: 300 4 | drain_timeout: 360s 5 | drain_retries: 3 6 | drain_retry_delay_seconds: 10 7 | -------------------------------------------------------------------------------- /tests/cloud_playbooks/templates/gcs_life.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "rule": 3 | [ 4 | { 5 | "action": {"type": "Delete"}, 6 | "condition": {"age": {{expire_days}}} 7 | } 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu16-canal-ha.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1604 4 | mode: ha 5 | 6 | # Kubespray settings 7 | calico_datastore: etcd 8 | kube_network_plugin: canal 9 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - kubespray-approvers 5 | reviewers: 6 | - kubespray-reviewers 7 | emeritus_approvers: 8 | - kubespray-emeritus_approvers -------------------------------------------------------------------------------- /contrib/terraform/hetzner/output.tf: -------------------------------------------------------------------------------- 1 | output "master_ips" { 2 | value = module.kubernetes.master_ip_addresses 3 | } 4 | 5 | output "worker_ips" { 6 | value = module.kubernetes.worker_ip_addresses 7 | } 8 | -------------------------------------------------------------------------------- /contrib/terraform/metal/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | required_providers { 5 | metal = { 6 | source = "equinix/metal" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /inventory/sample/group_vars/all/cri-o.yml: -------------------------------------------------------------------------------- 1 | # crio_insecure_registries: 2 | # - 10.0.0.2:5000 3 | # crio_registry_auth: 4 | # - registry: 10.0.0.2:5000 5 | # username: user 6 | # password: pass 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-ns.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: "{{ netcheck_namespace }}" 5 | labels: 6 | name: "{{ netcheck_namespace }}" 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/azuredisk-csi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Available values: Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS 3 | storage_account_type: StandardSSD_LRS 4 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: adduser 4 | user: "{{ addusers.kube }}" 5 | when: 6 | - not is_fedora_coreos 7 | tags: 8 | - kubelet 9 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - python3-apt 4 | - apt-transport-https 5 | - software-properties-common 6 | - conntrack 7 | - apparmor 8 | - libseccomp2 9 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-kube-ovn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: default 5 | vm_memory: 3072Mi 6 | 7 | # Kubespray settings 8 | kube_network_plugin: kube-ovn 9 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/clearlinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crio_packages: 3 | - containers-basic 4 | 5 | crio_conmon: /usr/libexec/crio/conmon 6 | crio_seccomp_profile: /usr/share/defaults/crio/seccomp.json 7 | -------------------------------------------------------------------------------- /roles/container-engine/docker/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | roles: 6 | - role: kubespray-defaults 7 | - role: container-engine/docker 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/centos-7.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nvidia_driver_install_container: "{{ nvidia_driver_install_centos_container }}" 3 | nvidia_driver_install_supported: true 4 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/alb_ingress_controller/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - kubespray-approvers 5 | reviewers: 6 | - kubespray-reviewers -------------------------------------------------------------------------------- /roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: calico-kube-controllers 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-ns.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ registry_namespace }} 6 | labels: 7 | name: {{ registry_namespace }} 8 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-crio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: default 5 | 6 | # Kubespray settings 7 | container_manager: crio 8 | auto_renew_certificates: true 9 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu16-canal-sep.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1604 4 | mode: separate 5 | 6 | # Kubespray settings 7 | calico_datastore: etcd 8 | kube_network_plugin: canal 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | mitogen: 2 | @echo Mitogen support is deprecated. 3 | @echo Please run the following command manually: 4 | @echo ansible-playbook -c local mitogen.yml -vv 5 | clean: 6 | rm -rf dist/ 7 | rm *.retry 8 | -------------------------------------------------------------------------------- /contrib/dind/dind-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: False 4 | roles: 5 | - { role: dind-host } 6 | 7 | - hosts: containers 8 | roles: 9 | - { role: dind-cluster } 10 | -------------------------------------------------------------------------------- /contrib/terraform/vsphere/modules/kubernetes-cluster/templates/cloud-init.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | ssh_authorized_keys: 4 | %{ for ssh_public_key in ssh_public_keys ~} 5 | - ${ssh_public_key} 6 | %{ endfor ~} 7 | -------------------------------------------------------------------------------- /roles/adduser/vars/coreos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | addusers: 3 | - name: kube 4 | comment: "Kubernetes user" 5 | shell: /sbin/nologin 6 | system: yes 7 | group: "{{ kube_cert_group }}" 8 | createhome: no 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-16.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}" 3 | nvidia_driver_install_supported: true 4 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/vars/ubuntu-18.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nvidia_driver_install_container: "{{ nvidia_driver_install_ubuntu_container }}" 3 | nvidia_driver_install_supported: true 4 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-ns.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ local_path_provisioner_namespace }} 6 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/templates/k8s-certs-renew.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Renew K8S control plane certificates 3 | 4 | [Service] 5 | Type=oneshot 6 | ExecStart={{ bin_dir }}/k8s-certs-renew.sh 7 | -------------------------------------------------------------------------------- /tests/files/vagrant_centos7-kube-router.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: centos-7 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin: kube-router 8 | enable_network_policy: true 9 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/roles/provision/tasks/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Clean up left over jobs." 3 | command: "{{ bin_dir }}/kubectl delete jobs,pods --selector=\"deploy-heketi\"" 4 | changed_when: false 5 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/upcloud-csi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | expand_persistent_volumes: true 3 | parameters: 4 | tier: maxiops 5 | storage_classes: 6 | - name: standard 7 | is_default: true 8 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | macvlan_interface: eth0 3 | enable_nat_default_gateway: true 4 | 5 | # sysctl_file_path to add sysctl conf to 6 | sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" 7 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu16-kube-router-sep.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1604 4 | mode: separate 5 | 6 | # Kubespray settings 7 | bootstrap_os: ubuntu 8 | kube_network_plugin: kube-router 9 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/roles/provision/templates/heketi-service-account.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ServiceAccount", 4 | "metadata": { 5 | "name": "heketi-service-account" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /roles/adduser/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | gather_facts: false 6 | roles: 7 | - role: adduser 8 | vars: 9 | user: 10 | name: foo 11 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/k8s_cluster/k8s-net-macvlan.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # private interface, on a l2-network 3 | macvlan_interface: "eth1" 4 | 5 | # Enable nat in default gateway network interface 6 | enable_nat_default_gateway: true 7 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/k8s_cluster/k8s-net-macvlan.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # private interface, on a l2-network 3 | macvlan_interface: "eth1" 4 | 5 | # Enable nat in default gateway network interface 6 | enable_nat_default_gateway: true 7 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/templates/containerd-shim-kata-v2.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | KATA_CONF_FILE={{ kata_containers_config_dir }}/configuration-{{ shim }}.toml {{ kata_containers_dir }}/bin/containerd-shim-kata-v2 $@ 3 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | kata_containers_qemu_overhead: true 4 | kata_containers_qemu_overhead_fixed_cpu: 250m 5 | kata_containers_qemu_overhead_fixed_memory: 160Mi 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/openstack/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | persistent_volumes_enabled: false 3 | storage_classes: 4 | - name: standard 5 | is_default: true 6 | parameters: 7 | availability: nova 8 | -------------------------------------------------------------------------------- /roles/kubernetes/node/tasks/loadbalancer/kube-vip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: kube-vip | Write static pod 3 | template: 4 | src: manifests/kube-vip.manifest.j2 5 | dest: "{{ kube_manifest_dir }}/kube-vip.yml" 6 | mode: 0640 7 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/templates/cilium/secret.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | keys: {{ cilium_ipsec_key }} 5 | kind: Secret 6 | metadata: 7 | name: cilium-ipsec-keys 8 | namespace: kube-system 9 | type: Opaque -------------------------------------------------------------------------------- /test-infra/image-builder/roles/kubevirt-images/templates/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM kubevirt/registry-disk-v1alpha 2 | 3 | ARG cloud_image 4 | MAINTAINER "The Kubespray Project" 5 | 6 | COPY $cloud_image /disk 7 | -------------------------------------------------------------------------------- /inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # private interface, on a l2-network 3 | macvlan_interface: "eth1" 4 | 5 | # Enable nat in default gateway network interface 6 | enable_nat_default_gateway: true 7 | -------------------------------------------------------------------------------- /roles/container-engine/containerd/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: container-engine/containerd-common 4 | - role: container-engine/runc 5 | - role: container-engine/crictl 6 | - role: container-engine/nerdctl 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: coredns 6 | namespace: kube-system 7 | labels: 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/sa-rbd-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: rbd-provisioner 6 | namespace: {{ rbd_provisioner_namespace }} 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/cinder-csi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | storage_classes: 3 | - name: cinder-csi 4 | is_default: false 5 | parameters: 6 | availability: nova 7 | allowVolumeExpansion: false 8 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - python-apt 4 | - aufs-tools 5 | - apt-transport-https 6 | - software-properties-common 7 | - conntrack 8 | - apparmor 9 | - libseccomp2 10 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/heketi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: heketi-node 3 | roles: 4 | - { role: prepare } 5 | 6 | - hosts: kube_control_plane[0] 7 | tags: 8 | - "provision" 9 | roles: 10 | - { role: provision } 11 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/modules/compute/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | openstack = { 4 | source = "terraform-provider-openstack/openstack" 5 | } 6 | } 7 | required_version = ">= 0.12.26" 8 | } 9 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/modules/network/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | openstack = { 4 | source = "terraform-provider-openstack/openstack" 5 | } 6 | } 7 | required_version = ">= 0.12.26" 8 | } 9 | -------------------------------------------------------------------------------- /roles/container-engine/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: container-engine/containerd-common 4 | - role: container-engine/docker-storage 5 | when: docker_container_storage_setup and ansible_os_family == "RedHat" 6 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/nodelocaldns-sa.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nodelocaldns 5 | namespace: kube-system 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: alb-ingress 6 | namespace: {{ alb_ingress_controller_namespace }} 7 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/vars/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | calico_wireguard_repo: https://download.copr.fedorainfracloud.org/results/jdoss/wireguard/epel-7-$basearch/ 3 | calico_wireguard_packages: 4 | - wireguard-dkms 5 | - wireguard-tools 6 | -------------------------------------------------------------------------------- /tests/files/packet_debian10-docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-10 4 | mode: default 5 | 6 | # Use docker 7 | container_manager: docker 8 | etcd_deployment_type: docker 9 | resolvconf_mode: docker_dns 10 | -------------------------------------------------------------------------------- /tests/files/packet_debian11-docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-11 4 | mode: default 5 | 6 | # Use docker 7 | container_manager: docker 8 | etcd_deployment_type: docker 9 | resolvconf_mode: docker_dns 10 | -------------------------------------------------------------------------------- /tests/files/packet_oracle7-canal-ha.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: oracle-7 4 | mode: ha 5 | 6 | # Kubespray settings 7 | calico_datastore: etcd 8 | kube_network_plugin: canal 9 | auto_renew_certificates: true 10 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/heketi-tear-down.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: kube_control_plane[0] 3 | roles: 4 | - { role: tear-down } 5 | 6 | - hosts: heketi-node 7 | become: yes 8 | roles: 9 | - { role: tear-down-disks } 10 | -------------------------------------------------------------------------------- /contrib/terraform/upcloud/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_providers { 4 | upcloud = { 5 | source = "UpCloudLtd/upcloud" 6 | version = "~>2.4.0" 7 | } 8 | } 9 | required_version = ">= 0.13" 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/aws_ebs/templates/aws-ebs-csi-driver.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: CSIDriver 4 | metadata: 5 | name: ebs.csi.aws.com 6 | spec: 7 | attachRequired: true 8 | podInfoOnMount: false 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-cred-secret.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Secret 3 | apiVersion: v1 4 | metadata: 5 | name: cloud-sa 6 | namespace: kube-system 7 | data: 8 | cloud-sa.json: {{ gcp_cred_secret.content }} 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-driver.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: csi.vsphere.vmware.com 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: false 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/00-namespace.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ ingress_nginx_namespace }} 6 | labels: 7 | name: {{ ingress_nginx_namespace }} 8 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu18-crio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1804 4 | mode: default 5 | 6 | # Kubespray settings 7 | container_manager: crio 8 | 9 | download_localhost: false 10 | download_run_once: true 11 | -------------------------------------------------------------------------------- /contrib/offline/registries.conf: -------------------------------------------------------------------------------- 1 | [registries.search] 2 | registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io'] 3 | 4 | [registries.insecure] 5 | registries = ['HOSTNAME:5000'] 6 | 7 | [registries.block] 8 | registries = [] 9 | -------------------------------------------------------------------------------- /contrib/terraform/aws/credentials.tfvars.example: -------------------------------------------------------------------------------- 1 | #AWS Access Key 2 | AWS_ACCESS_KEY_ID = "" 3 | #AWS Secret Key 4 | AWS_SECRET_ACCESS_KEY = "" 5 | #EC2 SSH Key Name 6 | AWS_SSH_KEY_NAME = "" 7 | #AWS Region 8 | AWS_DEFAULT_REGION = "eu-central-1" 9 | -------------------------------------------------------------------------------- /requirements-2.11.txt: -------------------------------------------------------------------------------- 1 | ansible==4.10.0 2 | ansible-core==2.11.11 3 | cryptography==3.3.2 4 | jinja2==2.11.3 5 | netaddr==0.7.19 6 | pbr==5.4.4 7 | jmespath==0.9.5 8 | ruamel.yaml==0.16.10 9 | ruamel.yaml.clib==0.2.6 10 | MarkupSafe==1.1.1 11 | -------------------------------------------------------------------------------- /requirements-2.12.txt: -------------------------------------------------------------------------------- 1 | ansible==5.7.1 2 | ansible-core==2.12.5 3 | cryptography==3.3.2 4 | jinja2==2.11.3 5 | netaddr==0.7.19 6 | pbr==5.4.4 7 | jmespath==0.9.5 8 | ruamel.yaml==0.16.10 9 | ruamel.yaml.clib==0.2.6 10 | MarkupSafe==1.1.1 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/sa-cephfs-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: cephfs-provisioner 6 | namespace: {{ cephfs_provisioner_namespace }} 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/00-namespace.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ rbd_provisioner_namespace }} 6 | labels: 7 | name: {{ rbd_provisioner_namespace }} 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/snapshots/snapshot-controller/templates/snapshot-ns.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ snapshot_controller_namespace }} 6 | labels: 7 | name: {{ snapshot_controller_namespace }} 8 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/debian-11.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - python3-apt 4 | - gnupg 5 | - apt-transport-https 6 | - software-properties-common 7 | - conntrack 8 | - iptables 9 | - apparmor 10 | - libseccomp2 11 | -------------------------------------------------------------------------------- /tests/files/packet_opensuse-canal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: opensuse-leap-15 4 | mode: default 5 | 6 | # Kubespray settings 7 | calico_datastore: etcd 8 | kube_network_plugin: canal 9 | auto_renew_certificates: true 10 | -------------------------------------------------------------------------------- /contrib/terraform/aws/modules/iam/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kube_control_plane-profile" { 2 | value = aws_iam_instance_profile.kube_control_plane.name 3 | } 4 | 5 | output "kube-worker-profile" { 6 | value = aws_iam_instance_profile.kube-worker.name 7 | } 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/azuredisk/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | azure_csi_use_instance_metadata: true 3 | azure_csi_controller_replicas: 2 4 | azure_csi_plugin_image_tag: latest 5 | azure_csi_controller_affinity: {} 6 | azure_csi_node_affinity: {} 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-cloud-config-secret.yml.j2: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | apiVersion: v1 3 | metadata: 4 | name: cloud-config 5 | namespace: kube-system 6 | data: 7 | azure.json: {{ cloud_config_secret.content }} 8 | -------------------------------------------------------------------------------- /roles/kubernetes/node/templates/http-proxy.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %} 3 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/centos-postdown-macvlan.cfg.j2: -------------------------------------------------------------------------------- 1 | {% if enable_nat_default_gateway %} 2 | iptables -t nat -D POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE 3 | {% endif %} 4 | 5 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/centos-postup-macvlan.cfg.j2: -------------------------------------------------------------------------------- 1 | {% if enable_nat_default_gateway %} 2 | iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE 3 | {% endif %} 4 | 5 | -------------------------------------------------------------------------------- /roles/upgrade/post-upgrade/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # how long to wait for cilium after upgrade before uncordoning 3 | upgrade_post_cilium_wait_timeout: 120s 4 | upgrade_node_post_upgrade_confirm: false 5 | upgrade_node_post_upgrade_pause_seconds: 0 6 | -------------------------------------------------------------------------------- /tests/files/packet_debian10-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-10 4 | mode: default 5 | 6 | # Kubespray settings 7 | auto_renew_certificates: true 8 | 9 | # plugins 10 | helm_enabled: true 11 | krew_enabled: true 12 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu18-weave-medium.rb: -------------------------------------------------------------------------------- 1 | $num_instances = 16 2 | $vm_memory ||= 1600 3 | $os = "ubuntu1804" 4 | $network_plugin = "weave" 5 | $kube_master_instances = 1 6 | $etcd_instances = 1 7 | $playbook = "tests/cloud_playbooks/wait-for-ssh.yml" 8 | -------------------------------------------------------------------------------- /roles/container-engine/containerd/vars/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | containerd_repo_info: 3 | repos: 4 | - > 5 | deb {{ containerd_debian_repo_base_url }} 6 | {{ ansible_distribution_release|lower }} 7 | {{ containerd_debian_repo_component }} 8 | -------------------------------------------------------------------------------- /roles/container-engine/containerd/vars/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | containerd_repo_info: 3 | repos: 4 | - > 5 | deb {{ containerd_ubuntu_repo_base_url }} 6 | {{ ansible_distribution_release|lower }} 7 | {{ containerd_ubuntu_repo_component }} 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/00-namespace.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ cephfs_provisioner_namespace }} 6 | labels: 7 | name: {{ cephfs_provisioner_namespace }} 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/metrics-server-sa.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | labels: 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu18-cilium-sep.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1804 4 | mode: separate 5 | 6 | # Kubespray settings 7 | kube_network_plugin: cilium 8 | enable_network_policy: true 9 | auto_renew_certificates: true 10 | -------------------------------------------------------------------------------- /tests/scripts/testcases_cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | cd tests && make delete-${CI_PLATFORM} -s ; cd - 5 | 6 | if [ -d ~/.ara ] ; then 7 | tar czvf ${CI_PROJECT_DIR}/cluster-dump/ara.tgz ~/.ara 8 | rm -fr ~/.ara 9 | fi 10 | -------------------------------------------------------------------------------- /contrib/terraform/exoscale/modules/kubernetes-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | exoscale = { 4 | source = "exoscale/exoscale" 5 | version = ">= 0.21" 6 | } 7 | } 8 | required_version = ">= 0.13" 9 | } 10 | -------------------------------------------------------------------------------- /contrib/terraform/hetzner/modules/kubernetes-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hcloud = { 4 | source = "hetznercloud/hcloud" 5 | version = "1.31.1" 6 | } 7 | } 8 | required_version = ">= 0.14" 9 | } 10 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | openstack = { 4 | source = "terraform-provider-openstack/openstack" 5 | version = "~> 1.17" 6 | } 7 | } 8 | required_version = ">= 0.12.26" 9 | } 10 | -------------------------------------------------------------------------------- /contrib/terraform/vsphere/modules/kubernetes-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vsphere = { 4 | source = "hashicorp/vsphere" 5 | version = ">= 1.24.3" 6 | } 7 | } 8 | required_version = ">= 0.13" 9 | } 10 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | container_manager: crio 7 | roles: 8 | - role: kubespray-defaults 9 | - role: container-engine/cri-o 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/cloud_controller/oci/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | oci_security_list_management: All 4 | oci_use_instance_principals: false 5 | oci_cloud_controller_version: 0.7.0 6 | oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-driver.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: storage.csi.upcloud.com 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: true 8 | fsGroupPolicy: File -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-service-account.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ external_hcloud_cloud.service_account_name }} 6 | namespace: kube-system 7 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/templates/k8s-certs-renew.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Timer to renew K8S control plane certificates 3 | 4 | [Timer] 5 | OnCalendar={{ auto_renew_certificates_systemd_calendar }} 6 | 7 | [Install] 8 | WantedBy=multi-user.target 9 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu18-aio-docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1804 4 | mode: aio 5 | vm_memory: 1600Mi 6 | 7 | # Use docker 8 | container_manager: docker 9 | etcd_deployment_type: docker 10 | resolvconf_mode: docker_dns 11 | -------------------------------------------------------------------------------- /contrib/aws_iam/kubernetes-master-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { "Service": "ec2.amazonaws.com"}, 7 | "Action": "sts:AssumeRole" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /contrib/aws_iam/kubernetes-minion-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { "Service": "ec2.amazonaws.com"}, 7 | "Action": "sts:AssumeRole" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /contrib/terraform/upcloud/modules/kubernetes-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_providers { 4 | upcloud = { 5 | source = "UpCloudLtd/upcloud" 6 | version = "~>2.4.0" 7 | } 8 | } 9 | required_version = ">= 0.13" 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: local-volume-provisioner 6 | namespace: {{ local_volume_provisioner_namespace }} 7 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/rr/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Global as_num (/calico/bgp/v1/global/as_num) 3 | # should be the same as in calico role 4 | global_as_num: "64512" 5 | calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostname) }}" 6 | -------------------------------------------------------------------------------- /tests/files/packet_debian10-cilium-svc-proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-10 4 | mode: ha 5 | 6 | # Kubespray settings 7 | kube_network_plugin: cilium 8 | enable_network_policy: true 9 | 10 | cilium_kube_proxy_replacement: strict 11 | -------------------------------------------------------------------------------- /roles/container-engine/youki/molecule/default/files/container.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "youki1" 4 | }, 5 | "image": { 6 | "image": "quay.io/kubespray/hello-world:latest" 7 | }, 8 | "log_path": "youki1.0.log", 9 | "linux": {} 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sa.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: local-path-provisioner-service-account 6 | namespace: {{ local_path_provisioner_namespace }} 7 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-ns.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ alb_ingress_controller_namespace }} 6 | labels: 7 | name: {{ alb_ingress_controller_namespace }} 8 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/tasks/reset.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reset | check and remove devices if still present 3 | include_tasks: reset_iface.yml 4 | vars: 5 | iface: "{{ item }}" 6 | loop: 7 | - cilium_host 8 | - cilium_net 9 | - cilium_vxlan 10 | -------------------------------------------------------------------------------- /roles/container-engine/containerd/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | container_manager: containerd 7 | roles: 8 | - role: kubespray-defaults 9 | - role: container-engine/containerd 10 | -------------------------------------------------------------------------------- /roles/container-engine/gvisor/molecule/default/files/container.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "gvisor1" 4 | }, 5 | "image": { 6 | "image": "quay.io/kubespray/hello-world:latest" 7 | }, 8 | "log_path": "gvisor1.0.log", 9 | "linux": {} 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes/client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubeconfig_localhost: false 3 | kubeconfig_localhost_ansible_host: false 4 | kubectl_localhost: false 5 | artifacts_dir: "{{ inventory_dir }}/artifacts" 6 | 7 | kube_config_dir: "/etc/kubernetes" 8 | kube_apiserver_port: "6443" 9 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu16-kube-router-svc-proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1604 4 | mode: separate 5 | 6 | # Kubespray settings 7 | bootstrap_os: ubuntu 8 | kube_network_plugin: kube-router 9 | 10 | kube_router_run_service_proxy: true 11 | -------------------------------------------------------------------------------- /docs/roadmap.md: -------------------------------------------------------------------------------- 1 | # Kubespray's roadmap 2 | 3 | We are tracking the evolution towards Kubespray 3.0 in [#6400](https://github.com/kubernetes-sigs/kubespray/issues/6400) as well as in other open issue in our [github issues](https://github.com/kubernetes-sigs/kubespray/issues/) section. 4 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/molecule/default/files/container.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "kata1" 4 | }, 5 | "image": { 6 | "image": "quay.io/kubespray/hello-world:latest" 7 | }, 8 | "log_path": "kata1.0.log", 9 | "linux": {} 10 | } 11 | -------------------------------------------------------------------------------- /roles/container-engine/youki/molecule/default/files/sandbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "youki1", 4 | "namespace": "default", 5 | "attempt": 1, 6 | "uid": "hdishd83djaidwnduwk28bcsb" 7 | }, 8 | "linux": {}, 9 | "log_directory": "/tmp" 10 | } 11 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-calico-nodelocaldns-secondary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: default 5 | vm_memory: 3072Mi 6 | 7 | # Kubespray settings 8 | enable_nodelocaldns_secondary: true 9 | loadbalancer_apiserver_type: haproxy 10 | -------------------------------------------------------------------------------- /contrib/dind/roles/dind-host/templates/inventory_builder.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # NOTE: if you change HOST_PREFIX, you also need to edit ./hosts [containers] section 3 | HOST_PREFIX=kube-node python3 contrib/inventory_builder/inventory.py {% for ip in addresses %} {{ ip }} {% endfor %} 4 | -------------------------------------------------------------------------------- /inventory/local/hosts.ini: -------------------------------------------------------------------------------- 1 | node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases 2 | 3 | [kube_control_plane] 4 | node1 5 | 6 | [etcd] 7 | node1 8 | 9 | [kube_node] 10 | node1 11 | 12 | [k8s_cluster:children] 13 | kube_node 14 | kube_control_plane 15 | -------------------------------------------------------------------------------- /roles/container-engine/gvisor/molecule/default/files/sandbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "gvisor1", 4 | "namespace": "default", 5 | "attempt": 1, 6 | "uid": "hdishd83djaidwnduwk28bcsb" 7 | }, 8 | "linux": {}, 9 | "log_directory": "/tmp" 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/centos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" 4 | - device-mapper-libs 5 | - nss 6 | - conntrack 7 | - container-selinux 8 | - libseccomp 9 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/vars/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | required_pkgs: 3 | - "{{ ( (ansible_distribution_major_version | int) < 8) | ternary('libselinux-python','python3-libselinux') }}" 4 | - device-mapper-libs 5 | - nss 6 | - conntrack 7 | - container-selinux 8 | - libseccomp 9 | -------------------------------------------------------------------------------- /tests/requirements-2.11.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements-2.11.txt 2 | yamllint==1.19.0 3 | apache-libcloud==2.2.1 4 | tox==3.11.1 5 | dopy==0.3.7 6 | ansible-lint==5.4.0 7 | molecule==3.0.6 8 | molecule-vagrant==0.3 9 | testinfra==5.2.2 10 | python-vagrant==0.5.15 11 | ara[server]==1.5.7 12 | -------------------------------------------------------------------------------- /tests/requirements-2.12.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements-2.12.txt 2 | yamllint==1.19.0 3 | apache-libcloud==2.2.1 4 | tox==3.11.1 5 | dopy==0.3.7 6 | ansible-lint==5.4.0 7 | molecule==3.0.6 8 | molecule-vagrant==0.3 9 | testinfra==5.2.2 10 | python-vagrant==0.5.15 11 | ara[server]==1.5.7 12 | -------------------------------------------------------------------------------- /contrib/azurerm/roles/generate-templates/templates/clear-rg.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": {}, 5 | "variables": {}, 6 | "resources": [], 7 | "outputs": {} 8 | } -------------------------------------------------------------------------------- /roles/container-engine/cri-dockerd/molecule/default/files/container.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "cri-dockerd1" 4 | }, 5 | "image": { 6 | "image": "quay.io/kubespray/hello-world:latest" 7 | }, 8 | "log_path": "cri-dockerd1.0.log", 9 | "linux": {} 10 | } 11 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/templates/http-proxy.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} 3 | -------------------------------------------------------------------------------- /roles/container-engine/docker/templates/http-proxy.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} 3 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/molecule/default/files/sandbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "kata1", 4 | "namespace": "default", 5 | "attempt": 1, 6 | "uid": "hdishd83djaidwnduwk28bcsb" 7 | }, 8 | "linux": {}, 9 | "log_directory": "/tmp" 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_engine_accelerator/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubernetes-apps/container_engine_accelerator/nvidia_gpu 4 | when: nvidia_accelerator_enabled 5 | tags: 6 | - apps 7 | - nvidia_gpu 8 | - container_engine_accelerator 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ local_volume_provisioner_namespace }} 6 | labels: 7 | name: {{ local_volume_provisioner_namespace }} 8 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/templates/resolvconf.j2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | write_files: 3 | - path: "/etc/resolv.conf" 4 | permissions: "0644" 5 | owner: "root" 6 | content: | 7 | {% for l in cloud_config.stdout_lines %} 8 | {{ l }} 9 | {% endfor %} 10 | # 11 | -------------------------------------------------------------------------------- /contrib/terraform/upcloud/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "master_ip" { 3 | value = module.kubernetes.master_ip 4 | } 5 | 6 | output "worker_ip" { 7 | value = module.kubernetes.worker_ip 8 | } 9 | 10 | output "loadbalancer_domain" { 11 | value = module.kubernetes.loadbalancer_domain 12 | } 13 | -------------------------------------------------------------------------------- /roles/container-engine/containerd/templates/http-proxy.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment={% if http_proxy is defined %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy is defined %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy is defined %}"NO_PROXY={{ no_proxy }}"{% endif %} 3 | -------------------------------------------------------------------------------- /roles/container-engine/cri-dockerd/molecule/default/files/sandbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "cri-dockerd1", 4 | "namespace": "default", 5 | "attempt": 1, 6 | "uid": "hdishd83djaidwnduwk28bcsb" 7 | }, 8 | "linux": {}, 9 | "log_directory": "/tmp" 10 | } 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/upcloud/templates/upcloud-csi-cred-secret.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: upcloud 6 | namespace: kube-system 7 | stringData: 8 | username: {{ upcloud_username }} 9 | password: {{ upcloud_password }} 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/krew/templates/krew.j2: -------------------------------------------------------------------------------- 1 | # krew bash env(kubespray) 2 | export KREW_ROOT="{{ krew_root_dir }}" 3 | {% if krew_default_index_uri is defined %} 4 | export KREW_DEFAULT_INDEX_URI='{{ krew_default_index_uri }}' 5 | {% endif %} 6 | export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" 7 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: default 5 | vm_memory: 3072Mi 6 | 7 | # Use docker 8 | container_manager: docker 9 | etcd_deployment_type: docker 10 | resolvconf_mode: docker_dns 11 | cri_dockerd_enabled: true 12 | -------------------------------------------------------------------------------- /tests/files/packet_opensuse-docker-cilium.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: opensuse-leap-15 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin: cilium 8 | 9 | # Docker specific settings: 10 | container_manager: docker 11 | etcd_deployment_type: docker 12 | -------------------------------------------------------------------------------- /roles/kubernetes/preinstall/tasks/0061-systemd-resolved.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Write resolved.conf 3 | template: 4 | src: resolved.conf.j2 5 | dest: /etc/systemd/resolved.conf 6 | owner: root 7 | group: root 8 | mode: 0644 9 | notify: Preinstall | Restart systemd-resolved 10 | -------------------------------------------------------------------------------- /contrib/terraform/aws/modules/nlb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "aws_nlb_api_id" { 2 | value = aws_lb.aws-nlb-api.id 3 | } 4 | 5 | output "aws_nlb_api_fqdn" { 6 | value = aws_lb.aws-nlb-api.dns_name 7 | } 8 | 9 | output "aws_nlb_api_tg_arn" { 10 | value = aws_lb_target_group.aws-nlb-api-tg.arn 11 | } 12 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/modules/ips/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | null = { 4 | source = "hashicorp/null" 5 | } 6 | openstack = { 7 | source = "terraform-provider-openstack/openstack" 8 | } 9 | } 10 | required_version = ">= 0.12.26" 11 | } 12 | -------------------------------------------------------------------------------- /roles/container-engine/cri-dockerd/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | container_manager: docker 7 | cri_dockerd_enabled: true 8 | roles: 9 | - role: kubespray-defaults 10 | - role: container-engine/cri-dockerd 11 | -------------------------------------------------------------------------------- /roles/network_plugin/canal/templates/calicoctl.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ETCD_ENDPOINTS={{ etcd_access_addresses }} \ 3 | ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ 4 | ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ 5 | ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ 6 | {{ bin_dir }}/calicoctl "$@" 7 | -------------------------------------------------------------------------------- /tests/cloud_playbooks/wait-for-ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: False 4 | gather_facts: False 5 | 6 | tasks: 7 | - name: Wait until SSH is available 8 | wait_for: 9 | host: "{{ ansible_host }}" 10 | port: 22 11 | timeout: 240 12 | delegate_to: localhost 13 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/modules/compute/ansible_bastion_template.txt: -------------------------------------------------------------------------------- 1 | ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" 2 | -------------------------------------------------------------------------------- /tests/scripts/terraform_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | apt-get install -y unzip 5 | curl https://releases.hashicorp.com/terraform/${TF_VERSION}/terraform_${TF_VERSION}_linux_amd64.zip > /tmp/terraform.zip 6 | unzip /tmp/terraform.zip && mv ./terraform /usr/local/bin/ && terraform --version 7 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-endpoint-svc.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Service", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "glusterfs" 6 | }, 7 | "spec": { 8 | "ports": [ 9 | {"port": 1} 10 | ] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/fedora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crio_packages: 3 | - cri-o 4 | 5 | crio_kubernetes_version_matrix: 6 | "1.24": "1.23" 7 | "1.23": "1.23" 8 | "1.22": "1.22" 9 | "1.21": "1.21" 10 | crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.23') }}" 11 | -------------------------------------------------------------------------------- /roles/etcdctl/templates/etcdctl.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # {{ ansible_managed }} 3 | # example invocation: etcdctl.sh get --keys-only --from-key "" 4 | 5 | etcdctl \ 6 | --cacert {{ kube_cert_dir }}/etcd/ca.crt \ 7 | --cert {{ kube_cert_dir }}/etcd/server.crt \ 8 | --key {{ kube_cert_dir }}/etcd/server.key "$@" 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-driver.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: cinder.csi.openstack.org 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: true 8 | volumeLifecycleModes: 9 | - Persistent 10 | - Ephemeral 11 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/coreos-service-nat_ouside.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Type=oneshot 3 | ExecStart=/bin/bash -c "iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE" 4 | 5 | [Install] 6 | WantedBy=sys-subsystem-net-devices-mac0.device 7 | -------------------------------------------------------------------------------- /tests/cloud_playbooks/create-packet.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | become: true 6 | vars: 7 | ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}" 8 | test_name: "{{ test_id | regex_replace('\\.', '-') }}" 9 | roles: 10 | - { role: packet-ci, vm_cleanup: false } 11 | -------------------------------------------------------------------------------- /tests/cloud_playbooks/delete-packet.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: no 5 | become: true 6 | vars: 7 | ci_job_name: "{{ lookup('env', 'CI_JOB_NAME') }}" 8 | test_name: "{{ test_id | regex_replace('\\.', '-') }}" 9 | roles: 10 | - { role: packet-ci, vm_cleanup: true } 11 | -------------------------------------------------------------------------------- /tests/files/packet_debian9-macvlan.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-9 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin: macvlan 8 | enable_nodelocaldns: false 9 | kube_proxy_masquerade_all: true 10 | macvlan_interface: "eth0" 11 | auto_renew_certificates: true 12 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu18-flannel.rb: -------------------------------------------------------------------------------- 1 | # For CI we are not worries about data persistence across reboot 2 | $libvirt_volume_cache = "unsafe" 3 | 4 | # Checking for box update can trigger API rate limiting 5 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 6 | $box_check_update = false 7 | $vm_cpus = 2 -------------------------------------------------------------------------------- /roles/kubernetes-apps/krew/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Krew | install krew on kube_control_plane 3 | import_tasks: krew.yml 4 | 5 | - name: Krew | install krew on localhost 6 | import_tasks: krew.yml 7 | delegate_to: localhost 8 | connection: local 9 | run_once: true 10 | when: kubectl_localhost 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/gcp-pd-csi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Choose between pd-standard and pd-ssd 3 | gcp_pd_csi_volume_type: pd-standard 4 | gcp_pd_regional_replication_enabled: false 5 | gcp_pd_restrict_zone_replication: false 6 | gcp_pd_restricted_zones: 7 | - europe-west1-b 8 | - europe-west1-c 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-secrets.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: registry-secret 5 | namespace: {{ registry_namespace }} 6 | type: Opaque 7 | data: 8 | {% if registry_htpasswd != "" %} 9 | htpasswd: {{ registry_htpasswd | b64encode }} 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: default 5 | vm_memory: 3072Mi 6 | 7 | # Kubespray settings 8 | metrics_server_enabled: true 9 | dashboard_namespace: "kube-dashboard" 10 | dashboard_enabled: true 11 | loadbalancer_apiserver_type: haproxy 12 | -------------------------------------------------------------------------------- /tests/files/packet_fedora34-docker-weave.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: fedora-34 4 | mode: default 5 | 6 | # Kubespray settings 7 | kube_network_plugin: weave 8 | 9 | # Docker specific settings: 10 | container_manager: docker 11 | etcd_deployment_type: docker 12 | resolvconf_mode: docker_dns 13 | -------------------------------------------------------------------------------- /docs/calico_peer_example/paris.yml: -------------------------------------------------------------------------------- 1 | # --- 2 | # peers: 3 | # - router_id: "10.99.0.2" 4 | # as: "65xxx" 5 | # sourceaddress: "None" 6 | # - router_id: "10.99.0.3" 7 | # as: "65xxx" 8 | # sourceaddress: "None" 9 | 10 | # loadbalancer_apiserver: 11 | # address: "10.99.0.21" 12 | # port: "8383" 13 | -------------------------------------------------------------------------------- /extra_playbooks/files/get_cinder_pvs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | kubectl get pv -o go-template --template='{{ range .items }}{{ $metadata := .metadata }}{{ with $value := index .metadata.annotations "pv.kubernetes.io/provisioned-by" }}{{ if eq $value "kubernetes.io/cinder" }}{{printf "%s\n" $metadata.name}}{{ end }}{{ end }}{{ end }}' 3 | -------------------------------------------------------------------------------- /roles/container-engine/cri-dockerd/templates/cri-dockerd.socket.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=CRI Docker Socket for the API 3 | PartOf=cri-dockerd.service 4 | 5 | [Socket] 6 | ListenStream=%t/cri-dockerd.sock 7 | SocketMode=0660 8 | SocketUser=root 9 | SocketGroup=docker 10 | 11 | [Install] 12 | WantedBy=sockets.target 13 | -------------------------------------------------------------------------------- /roles/container-engine/youki/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | youki_enabled: true 7 | container_manager: crio 8 | roles: 9 | - role: kubespray-defaults 10 | - role: container-engine/cri-o 11 | - role: container-engine/youki 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrole.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: netchecker-server 5 | namespace: {{ netcheck_namespace }} 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["list", "get"] 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/cluster_roles/files/k8s-cluster-critical-pc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: scheduling.k8s.io/v1 3 | kind: PriorityClass 4 | metadata: 5 | name: k8s-cluster-critical 6 | value: 1000000000 7 | globalDefault: false 8 | description: "This priority class should only be used by the pods installed using kubespray." 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/alb_ingress_controller/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | alb_ingress_controller_namespace: kube-system 3 | alb_ingress_aws_region: "us-east-1" 4 | 5 | # Enables logging on all outbound requests sent to the AWS API. 6 | # If logging is desired, set to true. 7 | alb_ingress_aws_debug: "false" 8 | -------------------------------------------------------------------------------- /tests/scripts/molecule_logs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Ensure a clean environent 4 | rm -fr molecule_logs 5 | mkdir -p molecule_logs 6 | 7 | # Collect and archive the logs 8 | find ~/.cache/molecule/ -name \*.out -o -name \*.err -type f | xargs tar -uf molecule_logs/molecule.tar 9 | gzip molecule_logs/molecule.tar 10 | -------------------------------------------------------------------------------- /tests/scripts/vagrant-validate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | curl -sL "https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb" -o "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb" 5 | dpkg -i "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb" 6 | vagrant validate --ignore-provider 7 | -------------------------------------------------------------------------------- /docs/calico_peer_example/new-york.yml: -------------------------------------------------------------------------------- 1 | # --- 2 | # peers: 3 | # - router_id: "10.99.0.34" 4 | # as: "65xxx" 5 | # sourceaddress: "None" 6 | # - router_id: "10.99.0.35" 7 | # as: "65xxx" 8 | # sourceaddress: "None" 9 | 10 | # loadbalancer_apiserver: 11 | # address: "10.99.0.44" 12 | # port: "8383" 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancement.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Enhancement Request 3 | about: Suggest an enhancement to the Kubespray project 4 | labels: kind/feature 5 | 6 | --- 7 | 8 | 9 | **What would you like to be added**: 10 | 11 | **Why is this needed**: 12 | -------------------------------------------------------------------------------- /contrib/terraform/upcloud/templates/inventory.tpl: -------------------------------------------------------------------------------- 1 | 2 | [all] 3 | ${connection_strings_master} 4 | ${connection_strings_worker} 5 | 6 | [kube_control_plane] 7 | ${list_master} 8 | 9 | [etcd] 10 | ${list_master} 11 | 12 | [kube_node] 13 | ${list_worker} 14 | 15 | [k8s_cluster:children] 16 | kube_control_plane 17 | kube_node 18 | -------------------------------------------------------------------------------- /contrib/terraform/vsphere/templates/inventory.tpl: -------------------------------------------------------------------------------- 1 | 2 | [all] 3 | ${connection_strings_master} 4 | ${connection_strings_worker} 5 | 6 | [kube_control_plane] 7 | ${list_master} 8 | 9 | [etcd] 10 | ${list_master} 11 | 12 | [kube_node] 13 | ${list_worker} 14 | 15 | [k8s_cluster:children] 16 | kube_control_plane 17 | kube_node 18 | -------------------------------------------------------------------------------- /roles/container-engine/docker/templates/docker-dns.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="DOCKER_DNS_OPTIONS=\ 3 | {% for d in docker_dns_servers %}--dns {{ d }} {% endfor %} \ 4 | {% for d in docker_dns_search_domains %}--dns-search {{ d }} {% endfor %} \ 5 | {% for o in docker_dns_options %}--dns-opt {{ o }} {% endfor %} \ 6 | " -------------------------------------------------------------------------------- /roles/container-engine/docker/templates/fedora_docker.repo.j2: -------------------------------------------------------------------------------- 1 | [docker-ce] 2 | name=Docker-CE Repository 3 | baseurl={{ docker_fedora_repo_base_url }} 4 | enabled=1 5 | gpgcheck={{ '1' if docker_fedora_repo_gpgkey else '0' }} 6 | gpgkey={{ docker_fedora_repo_gpgkey }} 7 | {% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} 8 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/secret-cephfs-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Secret 3 | apiVersion: v1 4 | metadata: 5 | name: cephfs-provisioner 6 | namespace: {{ cephfs_provisioner_namespace }} 7 | type: Opaque 8 | data: 9 | secret: {{ cephfs_provisioner_secret | b64encode }} 10 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/templates/calicoctl.etcd.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ETCD_ENDPOINTS={{ etcd_access_addresses }} \ 3 | ETCD_CA_CERT_FILE={{ calico_cert_dir }}/ca_cert.crt \ 4 | ETCD_CERT_FILE={{ calico_cert_dir }}/cert.crt \ 5 | ETCD_KEY_FILE={{ calico_cert_dir }}/key.pem \ 6 | {{ bin_dir }}/calicoctl --allow-version-mismatch "$@" 7 | -------------------------------------------------------------------------------- /test-infra/vagrant-docker/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -euo pipefail 3 | 4 | if [ "$#" -ne 1 ]; then 5 | echo "Usage: $0 tag" >&2 6 | exit 1 7 | fi 8 | 9 | VERSION="$1" 10 | IMG="quay.io/kubespray/vagrant:${VERSION}" 11 | 12 | docker build . --build-arg "KUBESPRAY_VERSION=${VERSION}" --tag "$IMG" 13 | docker push "$IMG" 14 | -------------------------------------------------------------------------------- /tests/cloud_playbooks/templates/boto.j2: -------------------------------------------------------------------------------- 1 | [Credentials] 2 | gs_access_key_id = {{ gs_key }} 3 | gs_secret_access_key = {{ gs_skey }} 4 | [Boto] 5 | https_validate_certificates = True 6 | [GoogleCompute] 7 | [GSUtil] 8 | default_project_id = {{ gce_project_id }} 9 | content_language = en 10 | default_api_version = 2 11 | [OAuth2] 12 | -------------------------------------------------------------------------------- /roles/container-engine/gvisor/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | gvisor_enabled: true 7 | container_manager: containerd 8 | roles: 9 | - role: kubespray-defaults 10 | - role: container-engine/containerd 11 | - role: container-engine/gvisor 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/azuredisk/templates/azure-csi-azuredisk-driver.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: CSIDriver 4 | metadata: 5 | name: disk.csi.azure.com 6 | spec: 7 | attachRequired: true 8 | podInfoOnMount: true 9 | volumeLifecycleModes: # added in Kubernetes 1.16 10 | - Persistent 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/sa-ingress-nginx.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: ingress-nginx 6 | namespace: {{ ingress_nginx_namespace }} 7 | labels: 8 | app.kubernetes.io/name: ingress-nginx 9 | app.kubernetes.io/part-of: ingress-nginx 10 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/templates/calico-ipamconfig.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: crd.projectcalico.org/v1 2 | kind: IPAMConfig 3 | metadata: 4 | name: default 5 | spec: 6 | autoAllocateBlocks: {{ calcio_ipam_autoallocateblocks }} 7 | strictAffinity: {{ calico_ipam_strictaffinity }} 8 | maxBlocksPerHost: {{ calico_ipam_maxblocksperhost }} 9 | -------------------------------------------------------------------------------- /_scale_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ###ssh-agent bash 4 | #ssh-add ~/.ssh/id_rsa 5 | 6 | if [ -z "$1" ]; then 7 | echo "Usage: $0 adminname" 8 | exit 1 9 | fi 10 | 11 | d=$(date '+%Y.%m.%d_%H:%M') 12 | 13 | export ANSIBLE_LOG_PATH=./deploy-$d.log 14 | ansible-playbook -u "$1" -i inventory/s000000/inventory.ini scale.yml -b --diff 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-cm.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: registry-config 5 | namespace: {{ registry_namespace }} 6 | {% if registry_config %} 7 | data: 8 | config.yml: |- 9 | {{ registry_config | to_yaml(indent=2, width=1337) | indent(width=4) }} 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/templates/calicoctl.kdd.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DATASTORE_TYPE=kubernetes \ 3 | {% if inventory_hostname in groups['kube_control_plane'] %} 4 | KUBECONFIG=/etc/kubernetes/admin.conf \ 5 | {% else %} 6 | KUBECONFIG=/etc/cni/net.d/calico-kubeconfig \ 7 | {% endif %} 8 | {{ bin_dir }}/calicoctl --allow-version-mismatch "$@" 9 | -------------------------------------------------------------------------------- /tests/files/packet_centos7-calico-ha.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: centos-7 4 | mode: ha 5 | 6 | # Kubespray settings 7 | download_localhost: false 8 | download_run_once: true 9 | typha_enabled: true 10 | calico_apiserver_enabled: true 11 | calico_backend: kdd 12 | typha_secure: true 13 | auto_renew_certificates: true 14 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu16-flannel-ha.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1604 4 | mode: ha 5 | 6 | # Kubespray settings 7 | kube_network_plugin: flannel 8 | etcd_deployment_type: kubeadm 9 | kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085 10 | skip_non_kubeadm_warning: true 11 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu18-calico-dual-stack.rb: -------------------------------------------------------------------------------- 1 | # For CI we are not worried about data persistence across reboot 2 | $libvirt_volume_cache = "unsafe" 3 | 4 | # Checking for box update can trigger API rate limiting 5 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 6 | $box_check_update = false 7 | $network_plugin = "calico" 8 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu20-flannel.rb: -------------------------------------------------------------------------------- 1 | $os = "ubuntu2004" 2 | 3 | # For CI we are not worries about data persistence across reboot 4 | $libvirt_volume_cache = "unsafe" 5 | 6 | # Checking for box update can trigger API rate limiting 7 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 8 | $box_check_update = false 9 | $vm_cpus = 2 -------------------------------------------------------------------------------- /contrib/azurerm/roles/generate-inventory_2/templates/loadbalancer_vars.j2: -------------------------------------------------------------------------------- 1 | ## External LB example config 2 | apiserver_loadbalancer_domain_name: {{ lb_pubip.dnsSettings.fqdn }} 3 | loadbalancer_apiserver: 4 | address: {{ lb_pubip.ipAddress }} 5 | port: 6443 6 | 7 | ## Internal loadbalancers for apiservers 8 | loadbalancer_apiserver_localhost: false 9 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2: -------------------------------------------------------------------------------- 1 | {% for host in groups['kube_node'] %} 2 | {% if hostvars[host]['access_ip'] is defined %} 3 | {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} 4 | {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} 5 | {% endif %} 6 | {% endif %} 7 | {% endfor %} 8 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd-host.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | After=network.target 4 | 5 | [Service] 6 | Type=notify 7 | User=root 8 | EnvironmentFile=/etc/etcd.env 9 | ExecStart={{ bin_dir }}/etcd 10 | NotifyAccess=all 11 | Restart=always 12 | RestartSec=10s 13 | LimitNOFILE=40000 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /tests/run-tests.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # curl -# -C - -o shebang-unit https://raw.github.com/arpinum-oss/shebang-unit/master/releases/shebang-unit 4 | # chmod +x shebang-unit 5 | 6 | now=$(date +"%Y%m%d%H%M%S") 7 | mkdir -p ${PWD}/tests-results 8 | ./shebang-unit --reporters=simple,junit --output-file=${PWD}/tests-results/junit_report-${now}.xml tests 9 | -------------------------------------------------------------------------------- /_upgrade_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ###ssh-agent bash 4 | #ssh-add ~/.ssh/id_rsa 5 | 6 | if [ -z "$1" ]; then 7 | echo "Usage: $0 adminname" 8 | exit 1 9 | fi 10 | 11 | d=$(date '+%Y.%m.%d_%H:%M') 12 | 13 | export ANSIBLE_LOG_PATH=./deploy-$d.log 14 | ansible-playbook -u "$1" -i inventory/s000000/inventory.ini upgrade-cluster.yml -b --diff 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-cloud-config-secret.yml.j2: -------------------------------------------------------------------------------- 1 | # This YAML file contains secret objects, 2 | # which are necessary to run csi cinder plugin. 3 | 4 | kind: Secret 5 | apiVersion: v1 6 | metadata: 7 | name: cloud-config 8 | namespace: kube-system 9 | data: 10 | cloud.conf: {{ cloud_config_secret.content }} 11 | -------------------------------------------------------------------------------- /roles/kubespray-defaults/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubespray constants 3 | 4 | kube_proxy_deployed: "{{ 'addon/kube-proxy' not in kubeadm_init_phases_skip }}" 5 | 6 | # The lowest version allowed to upgrade from (same as calico_version in the previous branch) 7 | calico_min_version_required: "v3.19.4" 8 | 9 | containerd_min_version_required: "1.3.7" 10 | -------------------------------------------------------------------------------- /tests/scripts/md-table/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | echo "Install requirements..." 5 | pip install -r ./tests/scripts/md-table/requirements.txt 6 | 7 | echo "Generate current file..." 8 | ./tests/scripts/md-table/main.py > tmp.md 9 | 10 | echo "Compare docs/ci.md with actual tests in tests/files/*.yml ..." 11 | cmp docs/ci.md tmp.md -------------------------------------------------------------------------------- /roles/bootstrap-os/molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE'] 7 | ).get_hosts('all') 8 | 9 | 10 | def test_python(host): 11 | assert host.exists('python3') or host.exists('python') 12 | -------------------------------------------------------------------------------- /tests/files/packet_almalinux8-calico-ha-ebpf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: almalinux-8 4 | mode: ha 5 | vm_memory: 3072Mi 6 | 7 | # Kubespray settings 8 | calico_bpf_enabled: true 9 | kube_proxy_remove: true 10 | loadbalancer_apiserver_localhost: true 11 | use_localhost_as_kubeapi_loadbalancer: true 12 | auto_renew_certificates: true 13 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*.{yaml,yml,yml.j2,yaml.j2}] 4 | indent_style = space 5 | indent_size = 2 6 | trim_trailing_whitespace = true 7 | insert_final_newline = true 8 | charset = utf-8 9 | 10 | [{Dockerfile}] 11 | indent_style = space 12 | indent_size = 2 13 | trim_trailing_whitespace = true 14 | insert_final_newline = true 15 | charset = utf-8 16 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | kata_containers_enabled: true 7 | container_manager: containerd 8 | roles: 9 | - role: kubespray-defaults 10 | - role: container-engine/containerd 11 | - role: container-engine/kata-containers 12 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd-events-host.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | After=network.target 4 | 5 | [Service] 6 | Type=notify 7 | User=root 8 | EnvironmentFile=/etc/etcd-events.env 9 | ExecStart={{ bin_dir }}/etcd 10 | NotifyAccess=all 11 | Restart=always 12 | RestartSec=10s 13 | LimitNOFILE=40000 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/templates/000-cilium-portmap.conflist.j2: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.3.1", 3 | "name": "cilium-portmap", 4 | "plugins": [ 5 | { 6 | "type": "cilium-cni" 7 | }, 8 | { 9 | "type": "portmap", 10 | "capabilities": { "portMappings": true } 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /contrib/terraform/exoscale/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | exoscale = { 4 | source = "exoscale/exoscale" 5 | version = ">= 0.21" 6 | } 7 | null = { 8 | source = "hashicorp/null" 9 | } 10 | template = { 11 | source = "hashicorp/template" 12 | } 13 | } 14 | required_version = ">= 0.13" 15 | } 16 | -------------------------------------------------------------------------------- /contrib/terraform/hetzner/templates/inventory.tpl: -------------------------------------------------------------------------------- 1 | [all] 2 | ${connection_strings_master} 3 | ${connection_strings_worker} 4 | 5 | [kube-master] 6 | ${list_master} 7 | 8 | [etcd] 9 | ${list_master} 10 | 11 | [kube-node] 12 | ${list_worker} 13 | 14 | [k8s-cluster:children] 15 | kube-master 16 | kube-node 17 | 18 | [k8s-cluster:vars] 19 | network_id=${network_id} 20 | -------------------------------------------------------------------------------- /contrib/terraform/hetzner/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hcloud = { 4 | source = "hetznercloud/hcloud" 5 | version = "1.31.1" 6 | } 7 | null = { 8 | source = "hashicorp/null" 9 | } 10 | template = { 11 | source = "hashicorp/template" 12 | } 13 | } 14 | required_version = ">= 0.14" 15 | } 16 | -------------------------------------------------------------------------------- /contrib/terraform/vsphere/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vsphere = { 4 | source = "hashicorp/vsphere" 5 | version = ">= 1.24.3" 6 | } 7 | null = { 8 | source = "hashicorp/null" 9 | } 10 | template = { 11 | source = "hashicorp/template" 12 | } 13 | } 14 | required_version = ">= 0.13" 15 | } 16 | -------------------------------------------------------------------------------- /roles/container-engine/docker/tasks/docker_plugin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Docker plugin 3 | command: docker plugin install --grant-all-permissions {{ docker_plugin | quote }} 4 | when: docker_plugin is defined 5 | register: docker_plugin_status 6 | failed_when: 7 | - docker_plugin_status.failed 8 | - '"already exists" not in docker_plugin_status.stderr' 9 | -------------------------------------------------------------------------------- /roles/network_plugin/multus/files/multus-clusterrolebinding.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: multus 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: multus 10 | subjects: 11 | - kind: ServiceAccount 12 | name: multus 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /roles/network_plugin/weave/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Weave | Create manifest 3 | template: 4 | src: weave-net.yml.j2 5 | dest: "{{ kube_config_dir }}/weave-net.yml" 6 | mode: 0644 7 | 8 | - name: Weave | Fix nodePort for Weave 9 | template: 10 | src: 10-weave.conflist.j2 11 | dest: /etc/cni/net.d/10-weave.conflist 12 | mode: 0644 13 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/glusterfs/client/tasks/setup-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Prerequisites 3 | package: name={{ item }} state=present 4 | with_items: 5 | - "centos-release-gluster{{ glusterfs_default_release }}" 6 | 7 | - name: Install Packages 8 | package: name={{ item }} state=present 9 | with_items: 10 | - glusterfs-client 11 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/templates/calico-crb.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: calico-node 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: calico-node 10 | subjects: 11 | - kind: ServiceAccount 12 | name: calico-node 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /contrib/dind/test-some_distros-most_CNIs.env: -------------------------------------------------------------------------------- 1 | DISTROS=(debian centos) 2 | EXTRAS=( 3 | 'kube_network_plugin=calico {"kubeadm_enabled":true}' 4 | 'kube_network_plugin=canal {"kubeadm_enabled":true}' 5 | 'kube_network_plugin=cilium {"kubeadm_enabled":true}' 6 | 'kube_network_plugin=flannel {"kubeadm_enabled":true}' 7 | 'kube_network_plugin=weave {"kubeadm_enabled":true}' 8 | ) 9 | -------------------------------------------------------------------------------- /roles/adduser/vars/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | addusers: 3 | - name: etcd 4 | comment: "Etcd user" 5 | createhome: yes 6 | home: "{{ etcd_data_dir }}" 7 | system: yes 8 | shell: /sbin/nologin 9 | 10 | - name: kube 11 | comment: "Kubernetes user" 12 | createhome: no 13 | system: yes 14 | shell: /sbin/nologin 15 | group: "{{ kube_cert_group }}" 16 | -------------------------------------------------------------------------------- /roles/adduser/vars/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | addusers: 3 | - name: etcd 4 | comment: "Etcd user" 5 | createhome: yes 6 | home: "{{ etcd_data_dir }}" 7 | system: yes 8 | shell: /sbin/nologin 9 | 10 | - name: kube 11 | comment: "Kubernetes user" 12 | createhome: no 13 | system: yes 14 | shell: /sbin/nologin 15 | group: "{{ kube_cert_group }}" 16 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart crio 3 | command: /bin/true 4 | notify: 5 | - CRI-O | reload systemd 6 | - CRI-O | reload crio 7 | 8 | - name: CRI-O | reload systemd 9 | systemd: 10 | daemon_reload: true 11 | 12 | - name: CRI-O | reload crio 13 | service: 14 | name: crio 15 | state: restarted 16 | enabled: yes 17 | -------------------------------------------------------------------------------- /roles/container-engine/crictl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get crictl completion 3 | command: "{{ bin_dir }}/crictl completion" 4 | changed_when: False 5 | register: cri_completion 6 | check_mode: false 7 | 8 | - name: Install crictl completion 9 | copy: 10 | dest: /etc/bash_completion.d/crictl 11 | content: "{{ cri_completion.stdout }}" 12 | mode: 0644 13 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/aws-ebs-csi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # To restrict which AZ the volume should be provisioned in 3 | # set this value to true and set the list of relevant AZs 4 | # For it to work, the flag aws_ebs_csi_enable_volume_scheduling 5 | # in AWS EBS Driver must be true 6 | restrict_az_provisioning: false 7 | aws_ebs_availability_zones: 8 | - eu-west-3c 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-cr.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: psp:registry 6 | namespace: {{ registry_namespace }} 7 | rules: 8 | - apiGroups: 9 | - policy 10 | resourceNames: 11 | - registry 12 | resources: 13 | - podsecuritypolicies 14 | verbs: 15 | - use 16 | -------------------------------------------------------------------------------- /roles/kubernetes/node/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Node | restart kubelet 3 | command: /bin/true 4 | notify: 5 | - Kubelet | reload systemd 6 | - Kubelet | restart kubelet 7 | 8 | - name: Kubelet | reload systemd 9 | systemd: 10 | daemon_reload: true 11 | 12 | - name: Kubelet | restart kubelet 13 | service: 14 | name: kubelet 15 | state: restarted 16 | -------------------------------------------------------------------------------- /roles/container-engine/docker/templates/rh_docker.repo.j2: -------------------------------------------------------------------------------- 1 | [docker-ce] 2 | name=Docker-CE Repository 3 | baseurl={{ docker_rh_repo_base_url }} 4 | enabled=0 5 | gpgcheck={{ '1' if docker_rh_repo_gpgkey else '0' }} 6 | keepcache={{ docker_rpm_keepcache | default('1') }} 7 | gpgkey={{ docker_rh_repo_gpgkey }} 8 | {% if http_proxy is defined %} 9 | proxy={{ http_proxy }} 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /roles/kubernetes/kubeadm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Kubeadm | restart kubelet 3 | command: /bin/true 4 | notify: 5 | - Kubeadm | reload systemd 6 | - Kubeadm | reload kubelet 7 | 8 | - name: Kubeadm | reload systemd 9 | systemd: 10 | daemon_reload: true 11 | 12 | - name: Kubeadm | reload kubelet 13 | service: 14 | name: kubelet 15 | state: restarted 16 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/centos-7.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crio_versioned_pkg: 3 | "1.24": 4 | - "cri-o-1.24.*" 5 | "1.23": 6 | - "cri-o-1.23.*" 7 | "1.22": 8 | - "cri-o-1.22.*" 9 | "1.21": 10 | - "cri-o-1.21.*" 11 | 12 | default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}" 13 | 14 | crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}" 15 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/centos-8.yml: -------------------------------------------------------------------------------- 1 | --- 2 | crio_versioned_pkg: 3 | "1.24": 4 | - "cri-o-1.24.*" 5 | "1.23": 6 | - "cri-o-1.23.*" 7 | "1.22": 8 | - "cri-o-1.22.*" 9 | "1.21": 10 | - "cri-o-1.21.*" 11 | 12 | default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}" 13 | 14 | crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}" 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config.j2: -------------------------------------------------------------------------------- 1 | [Global] 2 | port = "{{ external_vsphere_vcenter_port }}" 3 | insecure-flag = "{{ external_vsphere_insecure }}" 4 | secret-name = "cpi-global-secret" 5 | secret-namespace = "kube-system" 6 | 7 | [VirtualCenter "{{ external_vsphere_vcenter_ip }}"] 8 | datacenters = "{{ external_vsphere_datacenter }}" 9 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/templates/cilium-operator/crb.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: cilium-operator 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cilium-operator 10 | subjects: 11 | - kind: ServiceAccount 12 | name: cilium-operator 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu16-kube-router-svc-proxy.rb: -------------------------------------------------------------------------------- 1 | $os = "ubuntu1604" 2 | 3 | # For CI we are not worried about data persistence across reboot 4 | $libvirt_volume_cache = "unsafe" 5 | 6 | # Checking for box update can trigger API rate limiting 7 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 8 | $box_check_update = false 9 | 10 | $network_plugin = "kube-router" 11 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | ignore: | 5 | .git/ 6 | 7 | rules: 8 | braces: 9 | min-spaces-inside: 0 10 | max-spaces-inside: 1 11 | brackets: 12 | min-spaces-inside: 0 13 | max-spaces-inside: 1 14 | indentation: 15 | spaces: 2 16 | indent-sequences: consistent 17 | line-length: disable 18 | new-line-at-end-of-file: disable 19 | truthy: disable 20 | -------------------------------------------------------------------------------- /contrib/azurerm/clear-rg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | AZURE_RESOURCE_GROUP="$1" 6 | 7 | if [ "$AZURE_RESOURCE_GROUP" == "" ]; then 8 | echo "AZURE_RESOURCE_GROUP is missing" 9 | exit 1 10 | fi 11 | 12 | ansible-playbook generate-templates.yml 13 | 14 | az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete 15 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/templates/secrets_encryption.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | {{ kube_encryption_resources|to_nice_yaml|indent(4, True) }} 6 | providers: 7 | - {{ kube_encryption_algorithm }}: 8 | keys: 9 | - name: key 10 | secret: {{ kube_encrypt_token | b64encode }} 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/glusterfs/server/tasks/setup-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Prerequisites 3 | package: name={{ item }} state=present 4 | with_items: 5 | - "centos-release-gluster{{ glusterfs_default_release }}" 6 | 7 | - name: Install Packages 8 | package: name={{ item }} state=present 9 | with_items: 10 | - glusterfs-server 11 | - glusterfs-client 12 | -------------------------------------------------------------------------------- /roles/container-engine/gvisor/molecule/default/files/10-mynet.conf: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.2.0", 3 | "name": "mynet", 4 | "type": "bridge", 5 | "bridge": "cni0", 6 | "isGateway": true, 7 | "ipMasq": true, 8 | "ipam": { 9 | "type": "host-local", 10 | "subnet": "172.19.0.0/24", 11 | "routes": [ 12 | { 13 | "dst": "0.0.0.0/0" 14 | } 15 | ] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /roles/container-engine/nerdctl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get nerdctl completion 3 | command: "{{ bin_dir }}/nerdctl completion bash" 4 | changed_when: False 5 | register: nerdctl_completion 6 | check_mode: false 7 | 8 | - name: Install nerdctl completion 9 | copy: 10 | dest: /etc/bash_completion.d/nerdctl 11 | content: "{{ nerdctl_completion.stdout }}" 12 | mode: 0644 13 | -------------------------------------------------------------------------------- /roles/container-engine/youki/molecule/default/files/10-mynet.conf: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.4.0", 3 | "name": "mynet", 4 | "type": "bridge", 5 | "bridge": "cni0", 6 | "isGateway": true, 7 | "ipMasq": true, 8 | "ipam": { 9 | "type": "host-local", 10 | "subnet": "172.19.0.0/24", 11 | "routes": [ 12 | { 13 | "dst": "0.0.0.0/0" 14 | } 15 | ] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /roles/etcd/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: adduser 4 | user: "{{ addusers.etcd }}" 5 | when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) 6 | - role: adduser 7 | user: "{{ addusers.kube }}" 8 | when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos) 9 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: local-volume-provisioner-node-clusterrole 6 | namespace: {{ local_volume_provisioner_namespace }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: ["nodes"] 10 | verbs: ["get"] 11 | -------------------------------------------------------------------------------- /roles/kubernetes/secrets/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set secret_changed 3 | command: /bin/true 4 | notify: 5 | - set secret_changed to true 6 | - clear kubeconfig for root user 7 | 8 | - name: set secret_changed to true 9 | set_fact: 10 | secret_changed: true 11 | 12 | - name: clear kubeconfig for root user 13 | file: 14 | path: /root/.kube/config 15 | state: absent 16 | -------------------------------------------------------------------------------- /roles/container-engine/cri-dockerd/molecule/default/files/10-mynet.conf: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.2.0", 3 | "name": "mynet", 4 | "type": "bridge", 5 | "bridge": "cni0", 6 | "isGateway": true, 7 | "ipMasq": true, 8 | "ipam": { 9 | "type": "host-local", 10 | "subnet": "172.19.0.0/24", 11 | "routes": [ 12 | { 13 | "dst": "0.0.0.0/0" 14 | } 15 | ] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-server-svc.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: netchecker-service 5 | namespace: {{ netcheck_namespace }} 6 | spec: 7 | selector: 8 | app: netchecker-server 9 | ports: 10 | - 11 | protocol: TCP 12 | port: 8081 13 | targetPort: 8081 14 | nodePort: {{ netchecker_port }} 15 | type: NodePort 16 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/molecule/default/files/10-mynet.conf: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.2.0", 3 | "name": "mynet", 4 | "type": "bridge", 5 | "bridge": "cni0", 6 | "isGateway": true, 7 | "ipMasq": true, 8 | "ipam": { 9 | "type": "host-local", 10 | "subnet": "172.19.0.0/24", 11 | "routes": [ 12 | { 13 | "dst": "0.0.0.0/0" 14 | } 15 | ] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/glusterfs/client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For Ubuntu. 3 | glusterfs_default_release: "" 4 | glusterfs_ppa_use: yes 5 | glusterfs_ppa_version: "4.1" 6 | 7 | # Gluster configuration. 8 | gluster_mount_dir: /mnt/gluster 9 | gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster 10 | gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick" 11 | gluster_brick_name: gluster 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/etcd_metrics-service.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: etcd-metrics 5 | namespace: kube-system 6 | labels: 7 | {{ etcd_metrics_service_labels | to_yaml(indent=2, width=1337) | indent(width=4) }} 8 | spec: 9 | ports: 10 | - name: http-metrics 11 | protocol: TCP 12 | port: {{ etcd_metrics_port }} 13 | # targetPort: 14 | -------------------------------------------------------------------------------- /contrib/terraform/vsphere/modules/kubernetes-cluster/templates/metadata.tpl: -------------------------------------------------------------------------------- 1 | instance-id: ${hostname} 2 | local-hostname: ${hostname} 3 | network: 4 | version: 2 5 | ethernets: 6 | ${interface_name}: 7 | match: 8 | name: ${interface_name} 9 | dhcp4: false 10 | addresses: 11 | - ${ip}/${netmask} 12 | gateway4: ${gw} 13 | nameservers: 14 | addresses: [${dns}] 15 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/k8s_cluster/k8s-net-canal.yml: -------------------------------------------------------------------------------- 1 | # see roles/network_plugin/canal/defaults/main.yml 2 | 3 | # The interface used by canal for host <-> host communication. 4 | # If left blank, then the interface is choosing using the node's 5 | # default route. 6 | # canal_iface: "" 7 | 8 | # Whether or not to masquerade traffic to destinations not within 9 | # the pod network. 10 | # canal_masquerade: "true" 11 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/k8s_cluster/k8s-net-canal.yml: -------------------------------------------------------------------------------- 1 | # see roles/network_plugin/canal/defaults/main.yml 2 | 3 | # The interface used by canal for host <-> host communication. 4 | # If left blank, then the interface is choosing using the node's 5 | # default route. 6 | # canal_iface: "" 7 | 8 | # Whether or not to masquerade traffic to destinations not within 9 | # the pod network. 10 | # canal_masquerade: "true" 11 | -------------------------------------------------------------------------------- /inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml: -------------------------------------------------------------------------------- 1 | # see roles/network_plugin/canal/defaults/main.yml 2 | 3 | # The interface used by canal for host <-> host communication. 4 | # If left blank, then the interface is choosing using the node's 5 | # default route. 6 | # canal_iface: "" 7 | 8 | # Whether or not to masquerade traffic to destinations not within 9 | # the pod network. 10 | # canal_masquerade: "true" 11 | -------------------------------------------------------------------------------- /roles/container-engine/kata-containers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kata_containers_dir: /opt/kata 3 | kata_containers_config_dir: /etc/kata-containers 4 | kata_containers_containerd_bin_dir: /usr/local/bin 5 | 6 | kata_containers_qemu_default_memory: "{{ ansible_memtotal_mb }}" 7 | kata_containers_qemu_debug: 'false' 8 | kata_containers_qemu_sandbox_cgroup_only: 'true' 9 | kata_containers_qemu_enable_mem_prealloc: 'false' 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-crb.yml.j2: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: psp:registry 5 | namespace: {{ registry_namespace }} 6 | subjects: 7 | - kind: ServiceAccount 8 | name: registry 9 | namespace: {{ registry_namespace }} 10 | roleRef: 11 | kind: ClusterRole 12 | name: psp:registry 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/snapshots/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubernetes-apps/snapshots/snapshot-controller 4 | when: 5 | - cinder_csi_enabled or csi_snapshot_controller_enabled 6 | tags: 7 | - snapshot-controller 8 | 9 | - role: kubernetes-apps/snapshots/cinder-csi 10 | when: 11 | - cinder_csi_enabled 12 | tags: 13 | - snapshot 14 | - cinder-csi-driver 15 | -------------------------------------------------------------------------------- /contrib/misc/clusteradmin-rbac.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: kubernetes-dashboard 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: cluster-admin 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kubernetes-dashboard 15 | namespace: kube-system 16 | -------------------------------------------------------------------------------- /contrib/terraform/exoscale/templates/inventory.tpl: -------------------------------------------------------------------------------- 1 | [all] 2 | ${connection_strings_master} 3 | ${connection_strings_worker} 4 | 5 | [kube_control_plane] 6 | ${list_master} 7 | 8 | [kube_control_plane:vars] 9 | supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ] 10 | 11 | [etcd] 12 | ${list_master} 13 | 14 | [kube_node] 15 | ${list_worker} 16 | 17 | [k8s_cluster:children] 18 | kube_control_plane 19 | kube_node 20 | -------------------------------------------------------------------------------- /roles/container-engine/crun/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: crun | Download crun binary 3 | include_tasks: "../../../download/tasks/download_file.yml" 4 | vars: 5 | download: "{{ download_defaults | combine(downloads.crun) }}" 6 | 7 | - name: Copy crun binary from download dir 8 | copy: 9 | src: "{{ local_release_dir }}/crun" 10 | dest: "{{ crun_bin_dir }}/crun" 11 | mode: 0755 12 | remote_src: true 13 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/container_runtimes/kata_containers/templates/runtimeclass-kata-qemu.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: RuntimeClass 3 | apiVersion: node.k8s.io/v1 4 | metadata: 5 | name: kata-qemu 6 | handler: kata-qemu 7 | {% if kata_containers_qemu_overhead %} 8 | overhead: 9 | podFixed: 10 | cpu: {{ kata_containers_qemu_overhead_fixed_cpu }} 11 | memory: {{ kata_containers_qemu_overhead_fixed_memory }} 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | metrics_server_container_port: 4443 3 | metrics_server_kubelet_insecure_tls: true 4 | metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" 5 | metrics_server_metric_resolution: 15s 6 | metrics_server_limits_cpu: 100m 7 | metrics_server_limits_memory: 200Mi 8 | metrics_server_requests_cpu: 100m 9 | metrics_server_requests_memory: 200Mi 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/network_plugin/kube-ovn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Kube-OVN | Start Resources 3 | kube: 4 | name: "{{ item.item.name }}" 5 | kubectl: "{{ bin_dir }}/kubectl" 6 | filename: "{{ kube_config_dir }}/{{ item.item.file }}" 7 | state: "latest" 8 | with_items: "{{ kube_ovn_node_manifests.results }}" 9 | when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped 10 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/templates/calico-apiserver-ns.yml.j2: -------------------------------------------------------------------------------- 1 | # This is a tech-preview manifest which installs the Calico API server. Note that this manifest is liable to change 2 | # or be removed in future releases without further warning. 3 | # 4 | # Namespace and namespace-scoped resources. 5 | apiVersion: v1 6 | kind: Namespace 7 | metadata: 8 | labels: 9 | name: calico-apiserver 10 | name: calico-apiserver 11 | -------------------------------------------------------------------------------- /roles/network_plugin/cilium/tasks/reset_iface.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "reset | check if network device {{ iface }} is present" 3 | stat: 4 | path: "/sys/class/net/{{ iface }}" 5 | get_attributes: no 6 | get_checksum: no 7 | get_mime: no 8 | register: device_remains 9 | 10 | - name: "reset | remove network device {{ iface }}" 11 | command: "ip link del {{ iface }}" 12 | when: device_remains.stat.exists 13 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu20-calico-aio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-2004 4 | mode: aio 5 | vm_memory: 1600Mi 6 | 7 | # Kubespray settings 8 | auto_renew_certificates: true 9 | 10 | # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko 11 | kube_proxy_mode: iptables 12 | enable_nodelocaldns: False 13 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu22-calico-aio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-2204 4 | mode: aio 5 | vm_memory: 1600Mi 6 | 7 | # Kubespray settings 8 | auto_renew_certificates: true 9 | 10 | # Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko 11 | kube_proxy_mode: iptables 12 | enable_nodelocaldns: False 13 | -------------------------------------------------------------------------------- /contrib/terraform/metal/output.tf: -------------------------------------------------------------------------------- 1 | output "k8s_masters" { 2 | value = metal_device.k8s_master.*.access_public_ipv4 3 | } 4 | 5 | output "k8s_masters_no_etc" { 6 | value = metal_device.k8s_master_no_etcd.*.access_public_ipv4 7 | } 8 | 9 | output "k8s_etcds" { 10 | value = metal_device.k8s_etcd.*.access_public_ipv4 11 | } 12 | 13 | output "k8s_nodes" { 14 | value = metal_device.k8s_node.*.access_public_ipv4 15 | } 16 | 17 | -------------------------------------------------------------------------------- /roles/bootstrap-os/tasks/bootstrap-clearlinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ClearLinux ships with Python installed 3 | 4 | - name: Install basic package to run containers 5 | package: 6 | name: containers-basic 7 | state: present 8 | 9 | - name: Make sure docker service is enabled 10 | systemd: 11 | name: docker 12 | masked: false 13 | enabled: true 14 | daemon_reload: true 15 | state: started 16 | become: true 17 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrole.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: psp:netchecker-agent-hostnet 5 | namespace: {{ netcheck_namespace }} 6 | rules: 7 | - apiGroups: 8 | - policy 9 | resourceNames: 10 | - netchecker-agent-hostnet 11 | resources: 12 | - podsecuritypolicies 13 | verbs: 14 | - use 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: calico-kube-controllers 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: calico-kube-controllers 10 | subjects: 11 | - kind: ServiceAccount 12 | name: calico-kube-controllers 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/templates/admission-controls.v1beta2.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apiserver.config.k8s.io/v1 2 | kind: AdmissionConfiguration 3 | plugins: 4 | {% for plugin in kube_apiserver_enable_admission_plugins[0].split(',') %} 5 | {% if plugin in kube_apiserver_admission_plugins_needs_configuration %} 6 | - name: {{ plugin }} 7 | path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml 8 | {% endif %} 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /roles/network_plugin/canal/templates/canal-crb-calico.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | # Bind the calico ClusterRole to the canal ServiceAccount. 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: canal-calico 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: calico 11 | subjects: 12 | - kind: ServiceAccount 13 | name: canal 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /contrib/network-storage/heketi/roles/provision/templates/storageclass.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: gluster 6 | annotations: 7 | storageclass.beta.kubernetes.io/is-default-class: "true" 8 | provisioner: kubernetes.io/glusterfs 9 | parameters: 10 | resturl: "http://{{ endpoint_address }}:8080" 11 | restuser: "admin" 12 | restuserkey: "{{ heketi_admin_key }}" 13 | -------------------------------------------------------------------------------- /roles/download/tasks/extract_file.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: extract_file | Unpacking archive 3 | unarchive: 4 | src: "{{ download.dest }}" 5 | dest: "{{ download.dest | dirname }}" 6 | owner: "{{ download.owner | default(omit) }}" 7 | mode: "{{ download.mode | default(omit) }}" 8 | copy: no 9 | extra_opts: "{{ download.unarchive_extra_opts|default(omit) }}" 10 | when: 11 | - download.unarchive | default(false) 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/cinder/tasks/cinder-write-cacert.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # include to workaround mitogen issue 3 | # https://github.com/dw/mitogen/issues/663 4 | 5 | - name: Cinder CSI Driver | Write cacert file 6 | copy: 7 | src: "{{ cinder_cacert }}" 8 | dest: "{{ kube_config_dir }}/cinder-cacert.pem" 9 | group: "{{ kube_cert_group }}" 10 | mode: 0640 11 | delegate_to: "{{ delegate_host_to_write_cacert }}" 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-secret.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: "{{ external_hcloud_cloud.token_secret_name }}" 6 | namespace: kube-system 7 | data: 8 | token: "{{ external_hcloud_cloud.hcloud_api_token | b64encode }}" 9 | {% if external_hcloud_cloud.with_networks %} 10 | network: "{{ network_id|b64encode }}" 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/templates/eventratelimit.v1beta2.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: eventratelimit.admission.k8s.io/v1alpha1 2 | kind: Configuration 3 | limits: 4 | {% for limit in kube_apiserver_admission_event_rate_limits.values() %} 5 | - type: {{ limit.type }} 6 | qps: {{ limit.qps }} 7 | burst: {{ limit.burst }} 8 | {% if limit.cache_size is defined %} 9 | cacheSize: {{ limit.cache_size }} 10 | {% endif %} 11 | {% endfor %} 12 | -------------------------------------------------------------------------------- /roles/network_plugin/calico/rr/tasks/pre.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Calico-rr | Disable calico-rr service if it exists 3 | service: 4 | name: calico-rr 5 | state: stopped 6 | enabled: no 7 | failed_when: false 8 | 9 | - name: Calico-rr | Delete obsolete files 10 | file: 11 | path: "{{ item }}" 12 | state: absent 13 | with_items: 14 | - /etc/calico/calico-rr.env 15 | - /etc/systemd/system/calico-rr.service 16 | -------------------------------------------------------------------------------- /roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | # Bind the flannel ClusterRole to the canal ServiceAccount. 3 | kind: ClusterRoleBinding 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: canal-flannel 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: flannel 11 | subjects: 12 | - kind: ServiceAccount 13 | name: canal 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/centos-network-macvlan.cfg.j2: -------------------------------------------------------------------------------- 1 | DEVICE=mac0 2 | DEVICETYPE=macvlan 3 | TYPE=macvlan 4 | BOOTPROTO=none 5 | ONBOOT=yes 6 | NM_CONTROLLED=no 7 | 8 | MACVLAN_PARENT={{ macvlan_interface }} 9 | MACVLAN_MODE=bridge 10 | 11 | IPADDR={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }} 12 | NETMASK={{ node_pod_cidr|ipaddr('netmask') }} 13 | NETWORK={{ node_pod_cidr|ipaddr('network') }} 14 | 15 | -------------------------------------------------------------------------------- /contrib/azurerm/roles/generate-inventory/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Query Azure VMs # noqa 301 4 | command: azure vm list-ip-address --json {{ azure_resource_group }} 5 | register: vm_list_cmd 6 | 7 | - name: Set vm_list 8 | set_fact: 9 | vm_list: "{{ vm_list_cmd.stdout }}" 10 | 11 | - name: Generate inventory 12 | template: 13 | src: inventory.j2 14 | dest: "{{ playbook_dir }}/inventory" 15 | mode: 0644 16 | -------------------------------------------------------------------------------- /contrib/terraform/gcp/output.tf: -------------------------------------------------------------------------------- 1 | output "master_ips" { 2 | value = module.kubernetes.master_ip_addresses 3 | } 4 | 5 | output "worker_ips" { 6 | value = module.kubernetes.worker_ip_addresses 7 | } 8 | 9 | output "ingress_controller_lb_ip_address" { 10 | value = module.kubernetes.ingress_controller_lb_ip_address 11 | } 12 | 13 | output "control_plane_lb_ip_address" { 14 | value = module.kubernetes.control_plane_lb_ip_address 15 | } 16 | -------------------------------------------------------------------------------- /contrib/terraform/openstack/modules/network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "external_net" {} 2 | 3 | variable "network_name" {} 4 | 5 | variable "network_dns_domain" {} 6 | 7 | variable "cluster_name" {} 8 | 9 | variable "dns_nameservers" { 10 | type = list 11 | } 12 | 13 | variable "port_security_enabled" { 14 | type = bool 15 | } 16 | 17 | variable "subnet_cidr" {} 18 | 19 | variable "use_neutron" {} 20 | 21 | variable "router_id" {} 22 | -------------------------------------------------------------------------------- /contrib/terraform/vsphere/modules/kubernetes-cluster/output.tf: -------------------------------------------------------------------------------- 1 | output "master_ip" { 2 | value = { 3 | for name, machine in var.machines : 4 | "${var.prefix}-${name}" => machine.ip 5 | if machine.node_type == "master" 6 | } 7 | } 8 | 9 | output "worker_ip" { 10 | value = { 11 | for name, machine in var.machines : 12 | "${var.prefix}-${name}" => machine.ip 13 | if machine.node_type == "worker" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/vars/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | crio_storage_driver: "overlay" 4 | 5 | crio_versioned_pkg: 6 | "1.24": 7 | - "cri-o-1.24.*" 8 | "1.23": 9 | - "cri-o-1.23.*" 10 | "1.22": 11 | - "cri-o-1.22.*" 12 | "1.21": 13 | - "cri-o-1.21.*" 14 | 15 | default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}" 16 | 17 | crio_packages: "{{ centos_crio_packages | default(default_crio_packages) }}" 18 | -------------------------------------------------------------------------------- /tests/files/packet_debian10-calico-upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: debian-10 4 | mode: default 5 | 6 | # Docker specific settings: 7 | container_manager: docker 8 | etcd_deployment_type: docker 9 | 10 | # Pin disabling ipip mode to ensure proper upgrade 11 | ipip: false 12 | calico_vxlan_mode: Always 13 | calico_network_backend: bird 14 | 15 | # Needed to bypass deprecation check 16 | ignore_assert_errors: true 17 | -------------------------------------------------------------------------------- /contrib/terraform/exoscale/output.tf: -------------------------------------------------------------------------------- 1 | output "master_ips" { 2 | value = module.kubernetes.master_ip_addresses 3 | } 4 | 5 | output "worker_ips" { 6 | value = module.kubernetes.worker_ip_addresses 7 | } 8 | 9 | output "ingress_controller_lb_ip_address" { 10 | value = module.kubernetes.ingress_controller_lb_ip_address 11 | } 12 | 13 | output "control_plane_lb_ip_address" { 14 | value = module.kubernetes.control_plane_lb_ip_address 15 | } 16 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/resource-reader.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: system:metrics-server 5 | labels: 6 | addonmanager.kubernetes.io/mode: Reconcile 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - pods 12 | - nodes 13 | - nodes/metrics 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | -------------------------------------------------------------------------------- /roles/network_plugin/cni/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: CNI | make sure /opt/cni/bin exists 3 | file: 4 | path: /opt/cni/bin 5 | state: directory 6 | mode: 0755 7 | owner: kube 8 | recurse: true 9 | 10 | - name: CNI | Copy cni plugins 11 | unarchive: 12 | src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" 13 | dest: "/opt/cni/bin" 14 | mode: 0755 15 | remote_src: yes 16 | -------------------------------------------------------------------------------- /contrib/dind/hosts: -------------------------------------------------------------------------------- 1 | [local] 2 | # If you created a virtualenv for ansible, you may need to specify running the 3 | # python binary from there instead: 4 | #localhost ansible_connection=local ansible_python_interpreter=/home/user/kubespray/.venv/bin/python 5 | localhost ansible_connection=local 6 | 7 | [containers] 8 | kube-node1 9 | kube-node2 10 | kube-node3 11 | kube-node4 12 | kube-node5 13 | 14 | [containers:vars] 15 | ansible_connection=docker 16 | -------------------------------------------------------------------------------- /docs/opensuse.md: -------------------------------------------------------------------------------- 1 | # openSUSE Leap 15.3 and Tumbleweed 2 | 3 | openSUSE Leap installation Notes: 4 | 5 | - Install Ansible 6 | 7 | ```ShellSession 8 | sudo zypper ref 9 | sudo zypper -n install ansible 10 | 11 | ``` 12 | 13 | - Install Jinja2 and Python-Netaddr 14 | 15 | ```sudo zypper -n install python-Jinja2 python-netaddr``` 16 | 17 | Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment) 18 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | {% if crio_registry_auth is defined and crio_registry_auth|length %} 2 | { 3 | {% for reg in crio_registry_auth %} 4 | "auths": { 5 | "{{ reg.registry }}": { 6 | "auth": "{{ (reg.username + ':' + reg.password) | string | b64encode }}" 7 | } 8 | {% if not loop.last %} 9 | }, 10 | {% else %} 11 | } 12 | {% endif %} 13 | {% endfor %} 14 | } 15 | {% else %} 16 | {} 17 | {% endif %} 18 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-cloud-config.j2: -------------------------------------------------------------------------------- 1 | [Global] 2 | cluster-id = "{{ external_vsphere_kubernetes_cluster_id }}" 3 | 4 | [VirtualCenter "{{ external_vsphere_vcenter_ip }}"] 5 | insecure-flag = "{{ external_vsphere_insecure }}" 6 | user = "{{ external_vsphere_user }}" 7 | password = "{{ external_vsphere_password }}" 8 | port = "{{ external_vsphere_vcenter_port }}" 9 | datacenters = "{{ external_vsphere_datacenter }}" 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-ingress-nginx.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: ingress-nginx 6 | namespace: {{ ingress_nginx_namespace }} 7 | labels: 8 | app.kubernetes.io/name: ingress-nginx 9 | app.kubernetes.io/part-of: ingress-nginx 10 | {% if ingress_nginx_configmap %} 11 | data: 12 | {{ ingress_nginx_configmap | to_nice_yaml | indent(2) }} 13 | {%- endif %} 14 | -------------------------------------------------------------------------------- /roles/kubernetes/tokens/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - import_tasks: check-tokens.yml 4 | tags: 5 | - k8s-secrets 6 | - k8s-gen-tokens 7 | - facts 8 | 9 | - name: Make sure the tokens directory exits 10 | file: 11 | path: "{{ kube_token_dir }}" 12 | state: directory 13 | mode: o-rwx 14 | group: "{{ kube_cert_group }}" 15 | 16 | - import_tasks: gen_tokens.yml 17 | tags: 18 | - k8s-secrets 19 | - k8s-gen-tokens 20 | -------------------------------------------------------------------------------- /roles/network_plugin/canal/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reset_canal_cni 3 | command: /bin/true 4 | notify: 5 | - delete 10-canal.conflist 6 | - delete canal-node containers 7 | 8 | - name: delete 10-canal.conflist 9 | file: 10 | path: /etc/canal/10-canal.conflist 11 | state: absent 12 | 13 | - name: delete canal-node containers 14 | shell: "docker ps -af name=k8s_POD_canal-node* -q | xargs --no-run-if-empty docker rm -f" 15 | -------------------------------------------------------------------------------- /tests/ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | pipelining=True 3 | ansible_ssh_common_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 4 | retries=2 5 | [defaults] 6 | forks = 20 7 | host_key_checking=False 8 | gathering = smart 9 | fact_caching = jsonfile 10 | fact_caching_connection = /tmp 11 | stdout_callback = skippy 12 | library = ./library:../library 13 | callbacks_enabled = profile_tasks 14 | jinja2_extensions = jinja2.ext.do 15 | -------------------------------------------------------------------------------- /_deploy_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ###ssh-agent bash 4 | #ssh-add ~/.ssh/id_rsa 5 | 6 | if [ -z "$1" ]; then 7 | echo "Usage: $0 adminname" 8 | exit 1 9 | fi 10 | 11 | d=$(date '+%Y.%m.%d_%H:%M') 12 | 13 | # if ssh access to servers only with password, then 14 | # install sshpass on all nodes and add `-k` options 15 | 16 | export ANSIBLE_LOG_PATH=./deploy-$d.log 17 | ansible-playbook -u "$1" -i inventory/s000000/inventory.ini cluster.yml -b --diff 18 | -------------------------------------------------------------------------------- /roles/container-engine/cri-o/templates/unqualified.conf.j2: -------------------------------------------------------------------------------- 1 | {%- set _unqualified_registries = [] -%} 2 | {% for _registry in crio_registries if _registry.unqualified -%} 3 | {% if _registry.prefix is defined -%} 4 | {{ _unqualified_registries.append(_registry.prefix) }} 5 | {% else %} 6 | {{ _unqualified_registries.append(_registry.location) }} 7 | {%- endif %} 8 | {%- endfor %} 9 | 10 | unqualified-search-registries = {{ _unqualified_registries | string }} 11 | -------------------------------------------------------------------------------- /roles/network_plugin/macvlan/templates/10-macvlan.conf.j2: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.4.0", 3 | "name": "mynet", 4 | "type": "macvlan", 5 | "master": "{{ macvlan_interface }}", 6 | "hairpinMode": true, 7 | "ipam": { 8 | "type": "host-local", 9 | "subnet": "{{ node_pod_cidr }}", 10 | "routes": [ 11 | { "dst": "0.0.0.0/0" } 12 | ], 13 | "gateway": "{{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/clusterrolebinding-rbd-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: rbd-provisioner 6 | subjects: 7 | - kind: ServiceAccount 8 | name: rbd-provisioner 9 | namespace: {{ rbd_provisioner_namespace }} 10 | roleRef: 11 | kind: ClusterRole 12 | name: rbd-provisioner 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/policy_controller/calico/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Limits for calico apps 3 | calico_policy_controller_cpu_limit: 1000m 4 | calico_policy_controller_memory_limit: 256M 5 | calico_policy_controller_cpu_requests: 30m 6 | calico_policy_controller_memory_requests: 64M 7 | calico_policy_controller_deployment_nodeselector: "kubernetes.io/os: linux" 8 | 9 | # SSL 10 | calico_cert_dir: "/etc/calico/certs" 11 | canal_cert_dir: "/etc/canal/certs" 12 | -------------------------------------------------------------------------------- /tests/scripts/vagrant_clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # Cleanup vagrant VMs to avoid name conflicts 5 | 6 | apt-get install -y libvirt-clients 7 | 8 | for i in $(virsh list --name) 9 | do 10 | virsh destroy "$i" 11 | virsh undefine "$i" 12 | done 13 | 14 | 15 | # Cleanup domain volumes 16 | for i in $(virsh vol-list default|grep \.img |grep -v VAGRANTSLASH | cut -f 2 -d ' ') 17 | do 18 | virsh vol-delete "$i" --pool default 19 | done -------------------------------------------------------------------------------- /docs/cloud.md: -------------------------------------------------------------------------------- 1 | # Cloud providers 2 | 3 | ## Provisioning 4 | 5 | You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation. 6 | 7 | ## Deploy kubernetes 8 | 9 | With ansible-playbook command 10 | 11 | ```ShellSession 12 | ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml 13 | ``` 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-server-clusterrolebinding.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: netchecker-server 5 | namespace: {{ netcheck_namespace }} 6 | subjects: 7 | - kind: ServiceAccount 8 | name: netchecker-server 9 | namespace: {{ netcheck_namespace }} 10 | roleRef: 11 | kind: ClusterRole 12 | name: netchecker-server 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/upcloud/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | upcloud_csi_controller_replicas: 1 3 | upcloud_csi_provisioner_image_tag: "v3.1.0" 4 | upcloud_csi_attacher_image_tag: "v3.4.0" 5 | upcloud_csi_resizer_image_tag: "v1.4.0" 6 | upcloud_csi_plugin_image_tag: "v0.2.1" 7 | upcloud_csi_node_image_tag: "v2.5.0" 8 | upcloud_username: "{{ lookup('env','UPCLOUD_USERNAME') }}" 9 | upcloud_password: "{{ lookup('env','UPCLOUD_PASSWORD') }}" 10 | upcloud_tolerations: [] -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cephfs_provisioner_namespace: "cephfs-provisioner" 3 | cephfs_provisioner_cluster: ceph 4 | cephfs_provisioner_monitors: ~ 5 | cephfs_provisioner_admin_id: admin 6 | cephfs_provisioner_secret: secret 7 | cephfs_provisioner_storage_class: cephfs 8 | cephfs_provisioner_reclaim_policy: Delete 9 | cephfs_provisioner_claim_root: /volumes 10 | cephfs_provisioner_deterministic_names: true 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/role-rbd-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: rbd-provisioner 6 | namespace: {{ rbd_provisioner_namespace }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: ["secrets"] 10 | verbs: ["get"] 11 | - apiGroups: [""] 12 | resources: ["endpoints"] 13 | verbs: ["get", "list", "watch", "create", "update", "patch"] 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: registry-pvc 6 | namespace: {{ registry_namespace }} 7 | labels: 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | spec: 10 | accessModes: 11 | - {{ registry_storage_access_mode }} 12 | storageClassName: {{ registry_storage_class }} 13 | resources: 14 | requests: 15 | storage: {{ registry_disk_size }} 16 | -------------------------------------------------------------------------------- /tests/files/packet_centos7-docker-weave-upgrade-ha.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: centos-7 4 | mode: ha 5 | 6 | # Kubespray settings 7 | kube_network_plugin: weave 8 | kubernetes_audit: true 9 | 10 | # Docker specific settings: 11 | container_manager: docker 12 | etcd_deployment_type: docker 13 | resolvconf_mode: docker_dns 14 | 15 | # Needed to upgrade from 1.16 to 1.17, otherwise upgrade is partial and bug followed 16 | upgrade_cluster_setup: true 17 | -------------------------------------------------------------------------------- /tests/files/packet_ubuntu16-docker-weave-sep.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: ubuntu-1604 4 | mode: separate 5 | 6 | # Kubespray settings 7 | kube_network_plugin: weave 8 | auto_renew_certificates: true 9 | 10 | # Docker specific settings: 11 | container_manager: docker 12 | etcd_deployment_type: docker 13 | resolvconf_mode: docker_dns 14 | 15 | # Ubuntu 16 - docker containerd package available stopped at 1.4.6 16 | docker_containerd_version: latest 17 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | aliases: 2 | kubespray-approvers: 3 | - mattymo 4 | - chadswen 5 | - mirwan 6 | - miouge1 7 | - luckysb 8 | - floryut 9 | - oomichi 10 | - cristicalin 11 | kubespray-reviewers: 12 | - holmsten 13 | - bozzo 14 | - eppo 15 | - oomichi 16 | - jayonlau 17 | - cristicalin 18 | - liupeng0518 19 | kubespray-emeritus_approvers: 20 | - riverzhang 21 | - atoms 22 | - ant31 23 | - woopstar 24 | -------------------------------------------------------------------------------- /contrib/kvm-setup/README.md: -------------------------------------------------------------------------------- 1 | # Kubespray on KVM Virtual Machines hypervisor preparation 2 | 3 | A simple playbook to ensure your system has the right settings to enable Kubespray 4 | deployment on VMs. 5 | 6 | This playbook does not create Virtual Machines, nor does it run Kubespray itself. 7 | 8 | ## User creation 9 | 10 | If you want to create a user for running Kubespray deployment, you should specify 11 | both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`. 12 | -------------------------------------------------------------------------------- /contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/templates/glusterfs-kubernetes-pv.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: glusterfs 5 | spec: 6 | capacity: 7 | storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi" 8 | accessModes: 9 | - ReadWriteMany 10 | glusterfs: 11 | endpoints: glusterfs 12 | path: gluster 13 | readOnly: false 14 | persistentVolumeReclaimPolicy: Retain 15 | -------------------------------------------------------------------------------- /contrib/terraform/hetzner/modules/kubernetes-cluster/templates/cloud-init.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | users: 4 | - default 5 | - name: ubuntu 6 | shell: /bin/bash 7 | sudo: "ALL=(ALL) NOPASSWD:ALL" 8 | ssh_authorized_keys: 9 | %{ for ssh_public_key in ssh_public_keys ~} 10 | - ${ssh_public_key} 11 | %{ endfor ~} 12 | 13 | ssh_authorized_keys: 14 | %{ for ssh_public_key in ssh_public_keys ~} 15 | - ${ssh_public_key} 16 | %{ endfor ~} 17 | 18 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-poddisruptionbudget.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: cinder-csi-pdb 5 | namespace: kube-system 6 | spec: 7 | {% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} 8 | minAvailable: 1 9 | {% else %} 10 | minAvailable: 0 11 | {% endif %} 12 | selector: 13 | matchLabels: 14 | app: csi-cinder-controllerplugin 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-psp-cr.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: psp:local-path-provisioner 6 | namespace: {{ local_path_provisioner_namespace }} 7 | rules: 8 | - apiGroups: 9 | - policy 10 | resourceNames: 11 | - local-path-provisioner 12 | resources: 13 | - podsecuritypolicies 14 | verbs: 15 | - use 16 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/persistent_volumes/azuredisk-csi/templates/azure-csi-storage-class.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: disk.csi.azure.com 6 | provisioner: disk.csi.azure.com 7 | parameters: 8 | skuname: {{ storage_account_type }} 9 | {% if azure_csi_tags is defined %} 10 | tags: {{ azure_csi_tags }} 11 | {% endif %} 12 | reclaimPolicy: Delete 13 | volumeBindingMode: Immediate 14 | allowVolumeExpansion: true 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/policy_controller/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: policy_controller/calico 4 | when: 5 | - kube_network_plugin == 'calico' 6 | - enable_network_policy 7 | - calico_datastore != "kdd" 8 | tags: 9 | - policy-controller 10 | 11 | - role: policy_controller/calico 12 | when: 13 | - kube_network_plugin == 'canal' 14 | - calico_datastore != "kdd" 15 | tags: 16 | - policy-controller 17 | -------------------------------------------------------------------------------- /roles/network_plugin/weave/templates/10-weave.conflist.j2: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.3.0", 3 | "name": "weave", 4 | "plugins": [ 5 | { 6 | "name": "weave", 7 | "type": "weave-net", 8 | "hairpinMode": {{ weave_hairpin_mode | bool | lower }} 9 | }, 10 | { 11 | "type": "portmap", 12 | "capabilities": {"portMappings": true}, 13 | "snat": true 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /tests/files/packet_centos7-calico-ha-once-localhost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Instance settings 3 | cloud_image: centos-7 4 | mode: ha 5 | 6 | # Kubespray settings 7 | download_localhost: true 8 | download_run_once: true 9 | typha_enabled: true 10 | calico_apiserver_enabled: true 11 | calico_backend: kdd 12 | typha_secure: true 13 | disable_ipv6_dns: true 14 | auto_renew_certificates: true 15 | 16 | # Docker settings 17 | container_manager: docker 18 | etcd_deployment_type: docker 19 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/aws_ebs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | aws_ebs_csi_enable_volume_scheduling: true 3 | aws_ebs_csi_enable_volume_snapshot: false 4 | aws_ebs_csi_enable_volume_resizing: false 5 | aws_ebs_csi_controller_replicas: 1 6 | aws_ebs_csi_plugin_image_tag: latest 7 | 8 | # Add annotions to ebs_csi_controller. Useful if using kube2iam for role assumption 9 | # aws_ebs_csi_annotations: 10 | # - key: iam.amazonaws.com/role 11 | # value: your-ebs-role-arn 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/clusterrolebinding-cephfs-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: cephfs-provisioner 6 | subjects: 7 | - kind: ServiceAccount 8 | name: cephfs-provisioner 9 | namespace: {{ cephfs_provisioner_namespace }} 10 | roleRef: 11 | kind: ClusterRole 12 | name: cephfs-provisioner 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: psp:local-volume-provisioner 5 | namespace: {{ local_volume_provisioner_namespace }} 6 | rules: 7 | - apiGroups: 8 | - policy 9 | resourceNames: 10 | - local-volume-provisioner 11 | resources: 12 | - podsecuritypolicies 13 | verbs: 14 | - use 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-tcp-services.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: tcp-services 6 | namespace: {{ ingress_nginx_namespace }} 7 | labels: 8 | app.kubernetes.io/name: ingress-nginx 9 | app.kubernetes.io/part-of: ingress-nginx 10 | {% if ingress_nginx_configmap_tcp_services %} 11 | data: 12 | {{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }} 13 | {%- endif %} 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/cm-udp-services.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: udp-services 6 | namespace: {{ ingress_nginx_namespace }} 7 | labels: 8 | app.kubernetes.io/name: ingress-nginx 9 | app.kubernetes.io/part-of: ingress-nginx 10 | {% if ingress_nginx_configmap_udp_services %} 11 | data: 12 | {{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }} 13 | {%- endif %} 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/auth-delegator.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: metrics-server:system:auth-delegator 5 | labels: 6 | addonmanager.kubernetes.io/mode: Reconcile 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: system:auth-delegator 11 | subjects: 12 | - kind: ServiceAccount 13 | name: metrics-server 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /roles/network_plugin/flannel/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Flannel | Create Flannel manifests 3 | template: 4 | src: "{{ item.file }}.j2" 5 | dest: "{{ kube_config_dir }}/{{ item.file }}" 6 | mode: 0644 7 | with_items: 8 | - {name: flannel, file: cni-flannel-rbac.yml, type: sa} 9 | - {name: kube-flannel, file: cni-flannel.yml, type: ds} 10 | register: flannel_node_manifests 11 | when: 12 | - inventory_hostname == groups['kube_control_plane'][0] 13 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_cloud_controller/hcloud/templates/external-hcloud-cloud-role-bindings.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: system:cloud-controller-manager 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cluster-admin 10 | subjects: 11 | - kind: ServiceAccount 12 | name: {{ external_hcloud_cloud.service_account_name }} 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /roles/remove-node/post-remove/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete node 3 | command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}" 4 | delegate_to: "{{ groups['kube_control_plane']|first }}" 5 | when: inventory_hostname in groups['k8s_cluster'] 6 | retries: 10 7 | # Sometimes the api-server can have a short window of indisponibility when we delete a master node 8 | delay: 3 9 | register: result 10 | until: result is not failed 11 | -------------------------------------------------------------------------------- /tests/files/vagrant_centos7-kube-router.rb: -------------------------------------------------------------------------------- 1 | $num_instances = 2 2 | $vm_memory ||= 2048 3 | $os = "centos" 4 | 5 | $kube_master_instances = 1 6 | $etcd_instances = 1 7 | 8 | # For CI we are not worried about data persistence across reboot 9 | $libvirt_volume_cache = "unsafe" 10 | 11 | # Checking for box update can trigger API rate limiting 12 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 13 | $box_check_update = false 14 | 15 | $network_plugin = "kube-router" 16 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/all/cri-o.yml: -------------------------------------------------------------------------------- 1 | # crio_insecure_registries: 2 | # - 10.0.0.2:5000 3 | # crio_registry_auth: 4 | # - registry: 10.0.0.2:5000 5 | # username: user 6 | # password: pass 7 | crio_registries: 8 | - prefix: docker.io 9 | insecure: false 10 | blocked: false 11 | location: registry-1.docker.io 12 | mirrors: 13 | - location: 172.20.100.52:5000 14 | insecure: true 15 | - location: mirror.gcr.io 16 | insecure: false 17 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/all/cri-o.yml: -------------------------------------------------------------------------------- 1 | # crio_insecure_registries: 2 | # - 10.0.0.2:5000 3 | # crio_registry_auth: 4 | # - registry: 10.0.0.2:5000 5 | # username: user 6 | # password: pass 7 | crio_registries: 8 | - prefix: docker.io 9 | insecure: false 10 | blocked: false 11 | location: registry-1.docker.io 12 | mirrors: 13 | - location: 172.20.100.52:5000 14 | insecure: true 15 | - location: mirror.gcr.io 16 | insecure: false 17 | -------------------------------------------------------------------------------- /roles/bastion-ssh-config/molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | gather_facts: false 6 | roles: 7 | - role: bastion-ssh-config 8 | tasks: 9 | - name: Copy config to remote host 10 | copy: 11 | src: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}" 12 | dest: "{{ ssh_bastion_confing__name }}" 13 | owner: "{{ ansible_user }}" 14 | group: "{{ ansible_user }}" 15 | mode: 0644 16 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-clusterrolebinding.yml.j2: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: psp:netchecker-agent-hostnet 5 | namespace: {{ netcheck_namespace }} 6 | subjects: 7 | - kind: ServiceAccount 8 | name: netchecker-agent 9 | namespace: {{ netcheck_namespace }} 10 | roleRef: 11 | kind: ClusterRole 12 | name: psp:netchecker-agent-hostnet 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: psp:local-volume-provisioner 6 | namespace: {{ local_volume_provisioner_namespace }} 7 | rules: 8 | - apiGroups: 9 | - policy 10 | resourceNames: 11 | - local-volume-provisioner 12 | resources: 13 | - podsecuritypolicies 14 | verbs: 15 | - use 16 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/rbd_provisioner/templates/rolebinding-rbd-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: rbd-provisioner 6 | namespace: {{ rbd_provisioner_namespace }} 7 | subjects: 8 | - kind: ServiceAccount 9 | name: rbd-provisioner 10 | namespace: {{ rbd_provisioner_namespace }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: rbd-provisioner 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cert_manager_namespace: "cert-manager" 3 | cert_manager_user: 1001 4 | cert_manager_tolerations: [] 5 | cert_manager_affinity: {} 6 | cert_manager_nodeselector: {} 7 | 8 | ## Change leader election namespace when deploying on GKE Autopilot that forbid the changes on kube-system namespace. 9 | ## See https://github.com/jetstack/cert-manager/issues/3717 10 | cert_manager_leader_election_namespace: kube-system 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/network_plugin/canal/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Canal | Start Resources 3 | kube: 4 | name: "{{ item.item.name }}" 5 | namespace: "kube-system" 6 | kubectl: "{{ bin_dir }}/kubectl" 7 | resource: "{{ item.item.type }}" 8 | filename: "{{ kube_config_dir }}/{{ item.item.file }}" 9 | state: "latest" 10 | with_items: "{{ canal_manifests.results }}" 11 | when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped 12 | -------------------------------------------------------------------------------- /roles/kubernetes/kubeadm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # discovery_timeout modifies the discovery timeout 3 | # This value must be smaller than kubeadm_join_timeout 4 | discovery_timeout: 60s 5 | kubeadm_join_timeout: 120s 6 | 7 | # If non-empty, will use this string as identification instead of the actual hostname 8 | kube_override_hostname: >- 9 | {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} 10 | {%- else -%} 11 | {{ inventory_hostname }} 12 | {%- endif -%} 13 | -------------------------------------------------------------------------------- /tests/files/vagrant_fedora35-kube-router.rb: -------------------------------------------------------------------------------- 1 | $num_instances = 2 2 | $vm_memory ||= 2048 3 | $os = "fedora35" 4 | 5 | $kube_master_instances = 1 6 | $etcd_instances = 1 7 | 8 | # For CI we are not worried about data persistence across reboot 9 | $libvirt_volume_cache = "unsafe" 10 | 11 | # Checking for box update can trigger API rate limiting 12 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 13 | $box_check_update = false 14 | 15 | $network_plugin = "kube-router" 16 | -------------------------------------------------------------------------------- /contrib/terraform/aws/modules/vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "aws_vpc_id" { 2 | value = aws_vpc.cluster-vpc.id 3 | } 4 | 5 | output "aws_subnet_ids_private" { 6 | value = aws_subnet.cluster-vpc-subnets-private.*.id 7 | } 8 | 9 | output "aws_subnet_ids_public" { 10 | value = aws_subnet.cluster-vpc-subnets-public.*.id 11 | } 12 | 13 | output "aws_security_group" { 14 | value = aws_security_group.kubernetes.*.id 15 | } 16 | 17 | output "default_tags" { 18 | value = var.default_tags 19 | } 20 | -------------------------------------------------------------------------------- /roles/container-engine/docker/vars/amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://docs.aws.amazon.com/en_us/AmazonECS/latest/developerguide/docker-basics.html 3 | 4 | docker_versioned_pkg: 5 | 'latest': docker 6 | '18.09': docker-18.09.9ce-2.amzn2 7 | '19.03': docker-19.03.13ce-1.amzn2 8 | '20.10': docker-20.10.7-5.amzn2 9 | 10 | docker_version: "latest" 11 | 12 | docker_package_info: 13 | pkgs: 14 | - "{{ docker_versioned_pkg[docker_version | string] }}" 15 | enablerepo: amzn2extra-docker 16 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/metrics-server-service.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: metrics-server 5 | namespace: kube-system 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | app.kubernetes.io/name: "metrics-server" 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | app.kubernetes.io/name: metrics-server 13 | ports: 14 | - name: https 15 | port: 443 16 | protocol: TCP 17 | targetPort: https 18 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/resource-reader-clusterrolebinding.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: system:metrics-server 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: system:metrics-server 12 | subjects: 13 | - kind: ServiceAccount 14 | name: metrics-server 15 | namespace: kube-system 16 | -------------------------------------------------------------------------------- /tests/files/vagrant_ubuntu16-kube-router-sep.rb: -------------------------------------------------------------------------------- 1 | $num_instances = 2 2 | $vm_memory ||= 2048 3 | $os = "ubuntu1604" 4 | 5 | $kube_master_instances = 1 6 | $etcd_instances = 1 7 | 8 | # For CI we are not worried about data persistence across reboot 9 | $libvirt_volume_cache = "unsafe" 10 | 11 | # Checking for box update can trigger API rate limiting 12 | # https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html 13 | $box_check_update = false 14 | 15 | $network_plugin = "kube-router" 16 | -------------------------------------------------------------------------------- /inventory/prod/group_vars/all/aws.yml: -------------------------------------------------------------------------------- 1 | ## To use AWS EBS CSI Driver to provision volumes, uncomment the first value 2 | ## and configure the parameters below 3 | # aws_ebs_csi_enabled: true 4 | # aws_ebs_csi_enable_volume_scheduling: true 5 | # aws_ebs_csi_enable_volume_snapshot: false 6 | # aws_ebs_csi_enable_volume_resizing: false 7 | # aws_ebs_csi_controller_replicas: 1 8 | # aws_ebs_csi_plugin_image_tag: latest 9 | # aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' 10 | -------------------------------------------------------------------------------- /inventory/s000/group_vars/all/aws.yml: -------------------------------------------------------------------------------- 1 | ## To use AWS EBS CSI Driver to provision volumes, uncomment the first value 2 | ## and configure the parameters below 3 | # aws_ebs_csi_enabled: true 4 | # aws_ebs_csi_enable_volume_scheduling: true 5 | # aws_ebs_csi_enable_volume_snapshot: false 6 | # aws_ebs_csi_enable_volume_resizing: false 7 | # aws_ebs_csi_controller_replicas: 1 8 | # aws_ebs_csi_plugin_image_tag: latest 9 | # aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' 10 | -------------------------------------------------------------------------------- /inventory/sample/group_vars/all/aws.yml: -------------------------------------------------------------------------------- 1 | ## To use AWS EBS CSI Driver to provision volumes, uncomment the first value 2 | ## and configure the parameters below 3 | # aws_ebs_csi_enabled: true 4 | # aws_ebs_csi_enable_volume_scheduling: true 5 | # aws_ebs_csi_enable_volume_snapshot: false 6 | # aws_ebs_csi_enable_volume_resizing: false 7 | # aws_ebs_csi_controller_replicas: 1 8 | # aws_ebs_csi_plugin_image_tag: latest 9 | # aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment' 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/role-cephfs-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: cephfs-provisioner 6 | namespace: {{ cephfs_provisioner_namespace }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: ["secrets"] 10 | verbs: ["create", "get", "delete"] 11 | - apiGroups: [""] 12 | resources: ["endpoints"] 13 | verbs: ["get", "list", "watch", "create", "update", "patch"] 14 | -------------------------------------------------------------------------------- /roles/bootstrap-os/tasks/bootstrap-amazon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Enable EPEL repo for Amazon Linux 3 | yum_repository: 4 | name: epel 5 | file: epel 6 | description: Extra Packages for Enterprise Linux 7 - $basearch 7 | baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch 8 | gpgcheck: yes 9 | gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 10 | skip_if_unavailable: yes 11 | enabled: yes 12 | repo_gpgcheck: no 13 | when: epel_enabled 14 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_path_provisioner/templates/local-path-storage-sc.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: {{ local_path_provisioner_storage_class }} 6 | annotations: 7 | storageclass.kubernetes.io/is-default-class: "{{ local_path_provisioner_is_default_storageclass }}" 8 | provisioner: rancher.io/local-path 9 | volumeBindingMode: WaitForFirstConsumer 10 | reclaimPolicy: {{ local_path_provisioner_reclaim_policy }} 11 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/auth-reader.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: metrics-server-auth-reader 5 | namespace: kube-system 6 | labels: 7 | addonmanager.kubernetes.io/mode: Reconcile 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: extension-apiserver-authentication-reader 12 | subjects: 13 | - kind: ServiceAccount 14 | name: metrics-server 15 | namespace: kube-system 16 | -------------------------------------------------------------------------------- /roles/kubespray-defaults/tasks/fallback_ips_gather.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # include to workaround mitogen issue 3 | # https://github.com/dw/mitogen/issues/663 4 | 5 | - name: "Gather ansible_default_ipv4 from {{ delegate_host_to_gather_facts }}" 6 | setup: 7 | gather_subset: '!all,network' 8 | filter: "ansible_default_ipv4" 9 | delegate_to: "{{ delegate_host_to_gather_facts }}" 10 | connection: "{{ (delegate_host_to_gather_facts == 'localhost') | ternary('local', omit) }}" 11 | delegate_facts: yes 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/failing-test.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Failing Test 3 | about: Report test failures in Kubespray CI jobs 4 | labels: kind/failing-test 5 | 6 | --- 7 | 8 | 9 | 10 | **Which jobs are failing**: 11 | 12 | **Which test(s) are failing**: 13 | 14 | **Since when has it been failing**: 15 | 16 | **Testgrid link**: 17 | 18 | **Reason for failure**: 19 | 20 | **Anything else we need to know**: 21 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/local_path_provisioner/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | local_path_provisioner_enabled: false 3 | local_path_provisioner_namespace: "local-path-storage" 4 | local_path_provisioner_storage_class: "local-path" 5 | local_path_provisioner_reclaim_policy: Delete 6 | local_path_provisioner_claim_root: /opt/local-path-provisioner/ 7 | local_path_provisioner_is_default_storageclass: "true" 8 | local_path_provisioner_debug: false 9 | local_path_provisioner_helper_image_tag: "latest" 10 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_cloud_controller/vsphere/templates/external-vsphere-cpi-cloud-config-secret.yml.j2: -------------------------------------------------------------------------------- 1 | # This YAML file contains secret objects, 2 | # which are necessary to run external vsphere cloud controller. 3 | 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: cpi-global-secret 8 | namespace: kube-system 9 | stringData: 10 | {{ external_vsphere_vcenter_ip }}.username: "{{ external_vsphere_user }}" 11 | {{ external_vsphere_vcenter_ip }}.password: "{{ external_vsphere_password }}" 12 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/rolebinding-cephfs-provisioner.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: cephfs-provisioner 6 | namespace: {{ cephfs_provisioner_namespace }} 7 | subjects: 8 | - kind: ServiceAccount 9 | name: cephfs-provisioner 10 | namespace: {{ cephfs_provisioner_namespace }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: cephfs-provisioner 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/ingress_controller/alb_ingress_controller/templates/alb-ingress-clusterrolebinding.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: alb-ingress 6 | namespace: {{ alb_ingress_controller_namespace }} 7 | subjects: 8 | - kind: ServiceAccount 9 | name: alb-ingress 10 | namespace: {{ alb_ingress_controller_namespace }} 11 | roleRef: 12 | kind: ClusterRole 13 | name: alb-ingress 14 | apiGroup: rbac.authorization.k8s.io 15 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/metrics_server/templates/metrics-apiservice.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | labels: 6 | addonmanager.kubernetes.io/mode: Reconcile 7 | spec: 8 | service: 9 | name: metrics-server 10 | namespace: kube-system 11 | group: metrics.k8s.io 12 | version: v1beta1 13 | insecureSkipTLSVerify: {{ metrics_server_kubelet_insecure_tls }} 14 | groupPriorityMinimum: 100 15 | versionPriority: 100 16 | -------------------------------------------------------------------------------- /contrib/terraform/aws/templates/inventory.tpl: -------------------------------------------------------------------------------- 1 | [all] 2 | ${connection_strings_master} 3 | ${connection_strings_node} 4 | ${connection_strings_etcd} 5 | ${public_ip_address_bastion} 6 | 7 | [bastion] 8 | ${public_ip_address_bastion} 9 | 10 | [kube_control_plane] 11 | ${list_master} 12 | 13 | [kube_node] 14 | ${list_node} 15 | 16 | [etcd] 17 | ${list_etcd} 18 | 19 | [calico_rr] 20 | 21 | [k8s_cluster:children] 22 | kube_node 23 | kube_control_plane 24 | calico_rr 25 | 26 | [k8s_cluster:vars] 27 | ${nlb_api_fqdn} 28 | -------------------------------------------------------------------------------- /roles/etcd/tasks/refresh_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Refresh config | Create etcd config file 3 | template: 4 | src: etcd.env.j2 5 | dest: /etc/etcd.env 6 | mode: 0640 7 | notify: restart etcd 8 | when: is_etcd_master and etcd_cluster_setup 9 | 10 | - name: Refresh config | Create etcd-events config file 11 | template: 12 | src: etcd-events.env.j2 13 | dest: /etc/etcd-events.env 14 | mode: 0640 15 | notify: restart etcd-events 16 | when: is_etcd_master and etcd_events_cluster_setup 17 | -------------------------------------------------------------------------------- /roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-service.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: vsphere-csi-controller 5 | namespace: kube-system 6 | labels: 7 | app: vsphere-csi-controller 8 | spec: 9 | ports: 10 | - name: ctlr 11 | port: 2112 12 | targetPort: 2112 13 | protocol: TCP 14 | - name: syncer 15 | port: 2113 16 | targetPort: 2113 17 | protocol: TCP 18 | selector: 19 | app: vsphere-csi-controller 20 | -------------------------------------------------------------------------------- /roles/kubernetes/control-plane/templates/apiserver-audit-webhook-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | server: {{ audit_webhook_server_url }} 6 | {% for key in audit_webhook_server_extra_args %} 7 | {{ key }}: "{{ audit_webhook_server_extra_args[key] }}" 8 | {% endfor %} 9 | name: auditsink 10 | contexts: 11 | - context: 12 | cluster: auditsink 13 | user: "" 14 | name: default-context 15 | current-context: default-context 16 | preferences: {} 17 | users: [] 18 | -------------------------------------------------------------------------------- /contrib/dind/test-some_distros-kube_router_combo.env: -------------------------------------------------------------------------------- 1 | DISTROS=(debian centos) 2 | NETCHECKER_HOST=${NODES[0]} 3 | EXTRAS=( 4 | 'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}' 5 | 'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}' 6 | 'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}' 7 | 'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}' 8 | ) 9 | --------------------------------------------------------------------------------