├── files ├── localdns.conf ├── daemon.json └── terraform │ ├── cloud_init.cfg │ ├── main.tf │ └── variables.tf ├── tests ├── inventory └── k8s_cluster.yml ├── .vscode └── settings.json ├── libvirt-k8s.png ├── inventory ├── requirements.yml ├── templates ├── crio.conf.crun.j2 ├── libvirt_dnsmasq.j2 ├── systemd-resolved.j2 ├── metallb-l2.j2 ├── kubeadm-join-config.yaml.j2 ├── crio.conf.j2 ├── kubeadm-join-cp-config.yaml.j2 ├── cleanup-playbook.yml.j2 ├── kubeadm-config.yaml.j2 ├── inventory.j2 ├── haproxy.j2 └── rook-values.yml.j2 ├── .gitignore ├── execution-environment ├── requirements.yml └── execution-environment.yml ├── ansible.cfg ├── ansible-navigator.yaml ├── Makefile ├── 21_join_nodes.yml ├── 20_join_control_plane.yml ├── 29_save_inventory.yml ├── LICENSE ├── vars └── k8s_cluster.yml ├── 12_setup_kubeadm_config.yml ├── group_vars ├── k8s_nodes │ └── vars.yml └── vm_host │ └── vars.yml ├── 25_complete_setup.yml ├── 33_install_metalLB.yml ├── 99_cleanup.yml ├── 01_install_virtualization_tools.yml ├── main.yml ├── 32_install_rook.yml ├── 13_ignite_control_plane.yml ├── 03_provision_libvirt_resources.yml ├── 00_pre_flight_checklist.yml ├── 11_install_kube_packages.yml ├── 02_prepare_setup.yml ├── 22_apply_network_plugin.yml ├── 08_loadbalancer_services.yml ├── 30_install_ingress_controller.yml ├── README.md ├── 10_container_runtimes.yml └── 04_provisioning_vms.yml /files/localdns.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | dns=dnsmasq 3 | -------------------------------------------------------------------------------- /tests/inventory: -------------------------------------------------------------------------------- 1 | [vm_host] 2 | localhost ansible_connection=local 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "ansible.python.interpreterPath": "/bin/python" 3 | } -------------------------------------------------------------------------------- /libvirt-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubealex/libvirt-k8s-provisioner/HEAD/libvirt-k8s.png -------------------------------------------------------------------------------- /inventory: -------------------------------------------------------------------------------- 1 | [vm_host] 2 | localhost ansible_connection=local ansible_python_interpreter=/usr/bin/python3 3 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - ansible.posix 3 | - ansible.utils 4 | - community.crypto 5 | - community.general 6 | - kubernetes.core 7 | -------------------------------------------------------------------------------- /templates/crio.conf.crun.j2: -------------------------------------------------------------------------------- 1 | [crio.runtime.runtimes.crun] 2 | runtime_path = "/usr/bin/crun" 3 | runtime_type = "oci" 4 | runtime_root = "/run/crun" 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | clusters/ 2 | *-inventory 3 | *-cleanup-playbook.yml 4 | *.default 5 | *.good 6 | terraform.tfstate* 7 | .terraform* 8 | inventory-k8s 9 | context 10 | id_rsa* 11 | *.log -------------------------------------------------------------------------------- /templates/libvirt_dnsmasq.j2: -------------------------------------------------------------------------------- 1 | server=/{{ k8s.network.domain | default('k8s.test', true) }}/{{ k8s.network.network_cidr | default('192.168.200.0/24', true) | ansible.utils.next_nth_usable(1) }} 2 | -------------------------------------------------------------------------------- /execution-environment/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | - name: ansible.utils 4 | - name: community.crypto 5 | - name: community.general 6 | - name: kubernetes.core 7 | -------------------------------------------------------------------------------- /templates/systemd-resolved.j2: -------------------------------------------------------------------------------- 1 | [Resolve] 2 | DNS={{ k8s.network.network_cidr | default('192.168.200.0/24', true) | ansible.utils.next_nth_usable(1) }} 3 | Domains=~{{ k8s.network.domain | default('k8s.test', true) }} 4 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory=./inventory 3 | forks=10 4 | #only gather if specified in playbooks 5 | #gathering = explicit 6 | [galaxy] 7 | disable_gpg_verify = true 8 | server_list = galaxy 9 | 10 | [galaxy_server.galaxy] 11 | url=https://galaxy.ansible.com/ 12 | 13 | -------------------------------------------------------------------------------- /files/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "exec-opts": ["native.cgroupdriver=systemd"], 3 | "log-driver": "json-file", 4 | "log-opts": { 5 | "max-size": "100m" 6 | }, 7 | "storage-driver": "overlay2", 8 | "storage-opts": [ 9 | "overlay2.override_kernel_check=true" 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /execution-environment/execution-environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 1 3 | 4 | # ansible_config: 'ansible.cfg' 5 | build_arg_defaults: 6 | EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:latest' 7 | EE_BUILDER_IMAGE: 'quay.io/ansible/ansible-builder:latest' 8 | dependencies: 9 | galaxy: requirements.yml 10 | -------------------------------------------------------------------------------- /templates/metallb-l2.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: IPAddressPool 4 | metadata: 5 | name: ip-pool 6 | namespace: metallb-system 7 | spec: 8 | addresses: 9 | - {{ metallb.l2.iprange | default('192.168.200.210-192.168.200.250', true) }} 10 | 11 | --- 12 | apiVersion: metallb.io/v1beta1 13 | kind: L2Advertisement 14 | metadata: 15 | name: l2-config 16 | namespace: metallb-system -------------------------------------------------------------------------------- /ansible-navigator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible-navigator: 3 | ansible: 4 | cmdline: "--forks 15" 5 | inventory: 6 | help: False 7 | entries: 8 | - ./inventory 9 | execution-environment: 10 | container-engine: podman 11 | enabled: true 12 | image: k8s-ee:latest 13 | pull: 14 | policy: never 15 | logging: 16 | level: debug 17 | # mode: stdout 18 | 19 | playbook-artifact: 20 | enable: false 21 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help 2 | help: 3 | @echo "Usage for libvirt-k8s-provisioner:" 4 | @echo " setup to install required collections" 5 | @echo " create to create the cluster" 6 | @echo " debug to create the cluster with debug options" 7 | @echo " destroy to destroy the cluster" 8 | .PHONY: setup 9 | setup: 10 | @ansible-galaxy collection install -r requirements.yml 11 | .PHONY: create 12 | create: 13 | @ansible-playbook main.yml 14 | .PHONY: debug 15 | debug: 16 | @ansible-playbook main.yml -vv 17 | .PHONY: destroy 18 | destroy: 19 | @ansible-playbook 99_cleanup.yml 20 | -------------------------------------------------------------------------------- /templates/kubeadm-join-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1beta4 2 | caCertPath: /etc/kubernetes/pki/ca.crt 3 | discovery: 4 | bootstrapToken: 5 | apiServerEndpoint: "{{ hostvars[groups['loadbalancer'][0]].host_fqdn if ( k8s.control_plane.vms > 1 ) else hostvars[groups['masters'][0]].host_fqdn }}:6443" 6 | caCertHashes: 7 | - "{{ hostvars[groups['masters'][0]].kubehash }}" 8 | token: "{{ hostvars[groups['masters'][0]].kubetoken }}" 9 | timeout: 5m0s 10 | tlsBootstrapToken: "{{ hostvars[groups['masters'][0]].kubetoken }}" 11 | kind: JoinConfiguration 12 | nodeRegistration: 13 | imagePullPolicy: Always 14 | name: "{{ hostvars[inventory_hostname].host_fqdn }}" 15 | taints: null -------------------------------------------------------------------------------- /templates/crio.conf.j2: -------------------------------------------------------------------------------- 1 | [crio] 2 | [crio.runtime] 3 | selinux = false 4 | cgroup_manager="systemd" 5 | {% if k8s.cluster_os == "Ubuntu" %} 6 | default_runtime = "crun" 7 | {% endif %} 8 | default_capabilities = [ 9 | "CHOWN", 10 | "DAC_OVERRIDE", 11 | "FSETID", 12 | "FOWNER", 13 | "SETGID", 14 | "SETUID", 15 | "SETPCAP", 16 | "NET_BIND_SERVICE", 17 | "KILL", 18 | "MKNOD" 19 | ] 20 | 21 | [crio.network] 22 | plugin_dirs = [ 23 | "/opt/cni/bin", 24 | "/usr/libexec/cni", 25 | ] 26 | 27 | # A necessary configuration for Prometheus based metrics retrieval 28 | [crio.metrics] 29 | 30 | # Globally enable or disable metrics support. 31 | enable_metrics = true 32 | 33 | # The port on which the metrics server will listen. 34 | metrics_port = 9537 35 | -------------------------------------------------------------------------------- /21_join_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Play to join nodes in the cluster 3 | hosts: workers 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: Joining control plane nodes 8 | when: k8s_joined is not defined 9 | block: 10 | - name: Fire joinConfiguration template for worker nodes 11 | ansible.builtin.template: 12 | src: templates/kubeadm-join-config.yaml.j2 13 | dest: /tmp/kubeadm-join.yaml 14 | mode: "0755" 15 | 16 | - name: Join worker nodes in cluster # noqa no-changed-when 17 | ansible.builtin.command: kubeadm join --config /tmp/kubeadm-join.yaml 18 | become: true 19 | 20 | - name: Mark node as joined 21 | ansible.builtin.set_fact: 22 | k8s_joined: true 23 | -------------------------------------------------------------------------------- /files/terraform/cloud_init.cfg: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | hostname: ${instance_hostname} 3 | fqdn: ${instance_fqdn} 4 | manage_etc_hosts: true 5 | users: 6 | - name: ${cloud_user_username} 7 | sudo: ALL=(ALL) NOPASSWD:ALL 8 | groups: users, admin 9 | home: /home/${cloud_user_username} 10 | shell: /bin/bash 11 | passwd: ${cloud_user_password} 12 | lock_passwd: false 13 | %{ if cloud_user_sshkey != "" }ssh-authorized-keys: 14 | - ${cloud_user_sshkey} 15 | %{ endif } 16 | ssh_pwauth: true 17 | disable_root: false 18 | growpart: 19 | mode: auto 20 | devices: ['/'] 21 | packages: 22 | - qemu-guest-agent 23 | runcmd: 24 | - sed -i -e 's/^Defaults\s\+requiretty/# \0/' /etc/sudoers 25 | final_message: "The system is finally up, after $UPTIME seconds" 26 | # power_state: 27 | # mode: reboot -------------------------------------------------------------------------------- /20_join_control_plane.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Play to join control plane nodes in the cluster 3 | hosts: masters[1:] 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: Joining control plane nodes 8 | when: k8s_joined is not defined 9 | block: 10 | - name: Fire joinConfiguration template for control plane nodes 11 | ansible.builtin.template: 12 | src: templates/kubeadm-join-cp-config.yaml.j2 13 | dest: /tmp/kubeadm-join.yaml 14 | mode: "0755" 15 | 16 | - name: Join control-plane nodes in cluster # noqa no-changed-when 17 | ansible.builtin.command: kubeadm join --config /tmp/kubeadm-join.yaml 18 | become: true 19 | 20 | - name: Mark node as joined 21 | ansible.builtin.set_fact: 22 | k8s_joined: true 23 | -------------------------------------------------------------------------------- /29_save_inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Save inventory to disk 3 | hosts: vm_host 4 | vars_files: vars/k8s_cluster.yml 5 | tasks: 6 | - name: Gather facts from all servers 7 | ansible.builtin.setup: 8 | delegate_to: "{{ item }}" 9 | delegate_facts: true 10 | loop: "{{ groups['all'] }}" 11 | 12 | - name: Fire up inventory template 13 | ansible.builtin.template: 14 | src: templates/inventory.j2 15 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/{{ k8s.cluster_name | default('k8s-test', true) }}-inventory-k8s" # noqa yaml[line-length] 16 | mode: "0755" 17 | 18 | - name: Print inventory location 19 | ansible.builtin.debug: 20 | msg: Inventory is now saved as {{ k8s.cluster_name | default('k8s-test', true) }}-inventory-k8s, you can resume next steps by referencing it. 21 | -------------------------------------------------------------------------------- /templates/kubeadm-join-cp-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1beta4 2 | caCertPath: /etc/kubernetes/pki/ca.crt 3 | controlPlane: 4 | certificateKey: "{{ hostvars[groups['masters'][0]].kubecertkey }}" 5 | localAPIEndpoint: 6 | advertiseAddress: "{{ hostvars[inventory_hostname].host_ip }}" 7 | bindPort: 6443 8 | discovery: 9 | bootstrapToken: 10 | apiServerEndpoint: "{{ hostvars[groups['loadbalancer'][0]].host_fqdn if ( k8s.control_plane.vms > 1 ) else hostvars[groups['masters'][0]].host_fqdn }}:6443" 11 | caCertHashes: 12 | - "{{ hostvars[groups['masters'][0]].kubehash }}" 13 | token: "{{ hostvars[groups['masters'][0]].kubetoken }}" 14 | timeout: 5m0s 15 | tlsBootstrapToken: "{{ hostvars[groups['masters'][0]].kubetoken }}" 16 | kind: JoinConfiguration 17 | nodeRegistration: 18 | imagePullPolicy: Always 19 | name: "{{ hostvars[inventory_hostname].host_fqdn }}" 20 | taints: 21 | - effect: NoSchedule 22 | key: node-role.kubernetes.io/control-plane -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Alessandro Rossi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/k8s_cluster.yml: -------------------------------------------------------------------------------- 1 | # General configuration 2 | k8s: 3 | cluster_name: k8s-test 4 | cluster_os: Ubuntu 5 | cluster_version: "1.25" 6 | container_runtime: crio 7 | master_schedulable: false 8 | 9 | # Nodes configuration 10 | 11 | control_plane: 12 | vcpu: 4 13 | mem: 6 14 | vms: 1 15 | disk: 10 16 | 17 | worker_nodes: 18 | vcpu: 4 19 | mem: 6 20 | vms: 1 21 | disk: 10 22 | 23 | # Network configuration 24 | 25 | network: 26 | network_cidr: 192.168.200.0/24 27 | domain: k8s.test 28 | additional_san: "" 29 | pod_cidr: 10.20.0.0/16 30 | service_cidr: 10.110.0.0/16 31 | 32 | # Choose between [calico/flannel/cilium] 33 | cni_plugin: cilium 34 | 35 | # Rook configuration 36 | rook_ceph: 37 | install_rook: false 38 | volume_size: 50 39 | rook_cluster_size: 1 40 | 41 | # Ingress controller configuration [nginx/haproxy/contour] 42 | 43 | ingress_controller: 44 | install_ingress_controller: true 45 | type: haproxy 46 | node_port: 47 | http: 31080 48 | https: 31443 49 | 50 | # Section for metalLB setup 51 | 52 | metallb: 53 | install_metallb: false 54 | l2: 55 | iprange: 192.168.200.210-192.168.200.250 56 | -------------------------------------------------------------------------------- /vars/k8s_cluster.yml: -------------------------------------------------------------------------------- 1 | # General configuration 2 | k8s: 3 | cluster_name: k8s-test 4 | cluster_os: CentOS 5 | cluster_version: "1.35" 6 | container_runtime: crio 7 | master_schedulable: false 8 | 9 | # Nodes configuration 10 | 11 | control_plane: 12 | vcpu: 2 13 | mem: 2 14 | vms: 1 15 | disk: 40 16 | 17 | worker_nodes: 18 | vcpu: 2 19 | mem: 2 20 | vms: 1 21 | disk: 40 22 | 23 | # Network configuration 24 | 25 | network: 26 | network_cidr: 192.168.200.0/24 27 | domain: k8s.test 28 | additional_san: "" 29 | pod_cidr: 10.20.0.0/16 30 | service_cidr: 10.110.0.0/16 31 | 32 | # Choose between [calico/flannel/cilium] 33 | cni_plugin: calico 34 | 35 | # Rook configuration 36 | rook_ceph: 37 | install_rook: false 38 | volume_size: 50 39 | rook_cluster_size: 1 40 | 41 | # Ingress controller configuration [nginx/haproxy/contour] 42 | 43 | ingress_controller: 44 | install_ingress_controller: true 45 | type: haproxy 46 | node_port: 47 | http: 31080 48 | https: 31443 49 | 50 | # Section for metalLB setup 51 | 52 | metallb: 53 | install_metallb: false 54 | l2: 55 | iprange: 192.168.200.210-192.168.200.250 56 | -------------------------------------------------------------------------------- /templates/cleanup-playbook.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup playbook 3 | hosts: vm_host 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | become: true 7 | tasks: 8 | - name: Destroy cluster VM and libvirt resources 9 | community.general.terraform: 10 | force_init: true 11 | project_path: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}" 12 | state: absent 13 | 14 | - name: Delete all created paths and downloaded resources 15 | file: 16 | path: "{{ '{{' }} item {{ '}}' }}" 17 | state: absent 18 | loop: 19 | - /etc/NetworkManager/dnsmasq.d/{{ k8s.cluster_name | default('k8s-test', true) }}-libvirt_dnsmasq.conf 20 | - /etc/NetworkManager/conf.d/{{ k8s.cluster_name | default('k8s-test', true) }}-localdns.conf 21 | 22 | - name: Restart NetworkManager and libvirtd 23 | service: 24 | name: "{{ '{{' }} item {{ '}}' }}" 25 | state: restarted 26 | loop: 27 | - virtqemud 28 | - NetworkManager 29 | 30 | - name: Delete all created paths and downloaded resources 31 | file: 32 | path: "{{ '{{' }} item {{ '}}' }}" 33 | state: absent 34 | loop: 35 | - {{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }} -------------------------------------------------------------------------------- /12_setup_kubeadm_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare kubeadm-config for cluster setup 3 | hosts: masters[0] 4 | become: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Set fact for k8s version 9 | ansible.builtin.set_fact: 10 | kubernetes_version: v{{ k8s.cluster_version }}.{{ '11' if k8s.cluster_version is version('1.32', '==') else '7' if k8s.cluster_version is version('1.33', '==') else '3' if k8s.cluster_version is version('1.34', '==') else '0' if k8s.cluster_version is version('1.35', '==') }} 11 | 12 | - name: Process installation if cluster is not set 13 | when: k8s_installed is not defined 14 | block: 15 | - name: Generate cluster token # noqa no-changed-when 16 | ansible.builtin.command: kubeadm token generate 17 | register: kubetoken_generated 18 | 19 | - name: Set generated token as fact 20 | ansible.builtin.set_fact: 21 | kubetoken: "{{ kubetoken_generated.stdout }}" 22 | 23 | - name: Generate certificate key # noqa no-changed-when 24 | ansible.builtin.command: kubeadm certs certificate-key 25 | register: kubecert_generated 26 | 27 | - name: Set generated token as fact 28 | ansible.builtin.set_fact: 29 | kubecertkey: "{{ kubecert_generated.stdout }}" 30 | 31 | - name: Add kubelet config for node 32 | ansible.builtin.template: 33 | src: templates/kubeadm-config.yaml.j2 34 | dest: /tmp/kubeadm-config.yaml 35 | mode: "0755" 36 | -------------------------------------------------------------------------------- /templates/kubeadm-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1beta4 2 | bootstrapTokens: 3 | - groups: 4 | - system:bootstrappers:kubeadm:default-node-token 5 | token: "{{ hostvars[groups['masters'][0]].kubetoken }}" 6 | ttl: 24h0m0s 7 | usages: 8 | - signing 9 | - authentication 10 | certificateKey: "{{ hostvars[groups['masters'][0]].kubecertkey }}" 11 | kind: InitConfiguration 12 | localAPIEndpoint: 13 | advertiseAddress: "{{ hostvars[inventory_hostname].host_ip }}" 14 | bindPort: 6443 15 | nodeRegistration: 16 | imagePullPolicy: Always 17 | name: "{{ hostvars[inventory_hostname].host_fqdn }}" 18 | taints: 19 | - effect: NoSchedule 20 | key: node-role.kubernetes.io/control-plane 21 | --- 22 | apiServer: 23 | {% if k8s.network.additional_san is defined and k8s.network.additional_san != '' %} 24 | certSANs: 25 | - "{{ k8s.network.additional_san | default('localhost') }}" 26 | {% endif %} 27 | timeoutForControlPlane: 4m0s 28 | apiVersion: kubeadm.k8s.io/v1beta4 29 | certificatesDir: /etc/kubernetes/pki 30 | clusterName: kubernetes 31 | controllerManager: {} 32 | {% if k8s.control_plane.vms > 1 %} 33 | controlPlaneEndpoint: "{{ hostvars[groups['loadbalancer'][0]].host_fqdn }}" 34 | {% endif %} 35 | dns: {} 36 | etcd: 37 | local: 38 | dataDir: /var/lib/etcd 39 | imageRepository: registry.k8s.io 40 | kind: ClusterConfiguration 41 | kubernetesVersion: "{{ kubernetes_version }}" 42 | networking: 43 | dnsDomain: cluster.local 44 | podSubnet: "{{ ('10.244.0.0/16' if (k8s.network.cni_plugin == 'flannel') else k8s.network.pod_cidr) }}" 45 | serviceSubnet: "{{ k8s.network.service_cidr }}" 46 | scheduler: {} -------------------------------------------------------------------------------- /group_vars/k8s_nodes/vars.yml: -------------------------------------------------------------------------------- 1 | kubernetes: 2 | packages: 3 | k8s_packages: 4 | - kubelet 5 | - kubeadm 6 | - kubectl 7 | centos: 8 | k8s_repo: https://pkgs.k8s.io/core:/stable:/v{{ vars["k8s"]["cluster_version"] }}/rpm/ 9 | k8s_repo_key: https://pkgs.k8s.io/core:/stable:/v{{ vars["k8s"]["cluster_version"] }}/rpm/repodata/repomd.xml.key 10 | 11 | ubuntu: 12 | k8s_repo: deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v{{ vars["k8s"]["cluster_version"] }}/deb/ / 13 | k8s_repo_file: /etc/apt/sources.list.d/kubernetes.list 14 | k8s_repo_key: https://pkgs.k8s.io/core:/stable:/v{{ vars["k8s"]["cluster_version"] }}/deb/Release.key 15 | k8s_repo_keyring: /etc/apt/keyrings/kubernetes-apt-keyring.gpg 16 | 17 | crio: 18 | ubuntu: 19 | crio_repo: deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/ / 20 | crio_key: https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/Release.key 21 | crio_keyring: /etc/apt/keyrings/cri-o-apt-keyring.gpg 22 | crio_repofile: /etc/apt/sources.list.d/cri-o.list 23 | centos: 24 | crio_repo: https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/rpm/ 25 | crio_key: https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/rpm/repodata/repomd.xml.key 26 | containerd: 27 | centos: 28 | containerd_repo: https://download.docker.com/linux/centos/docker-ce.repo 29 | ubuntu: 30 | containerd_repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu jammy stable" 31 | containerd_repo_key: https://download.docker.com/linux/ubuntu/gpg 32 | containerd_repo_key_file: /etc/apt/trusted.gpg.d/docker.gpg 33 | -------------------------------------------------------------------------------- /25_complete_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Complete cluster setup 3 | hosts: vm_host 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: Prepare playbook for cluster deletion 8 | ansible.builtin.template: 9 | src: templates/cleanup-playbook.yml.j2 10 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/{{ k8s.cluster_name | default('k8s-test', true) }}-cleanup-playbook.yml" # noqa yaml[line-length] 11 | mode: "0755" 12 | 13 | - name: Delete image file 14 | ansible.builtin.file: 15 | path: "{{ item }}" 16 | state: absent 17 | mode: "0755" 18 | loop: 19 | - /tmp/{{ image_name }}.qcow2 20 | 21 | - name: Label worker nodes 22 | kubernetes.core.k8s_json_patch: 23 | kind: Node 24 | name: "{{ hostvars[item].host_fqdn }}" 25 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 26 | patch: 27 | - op: add 28 | path: /metadata/labels/node-role.kubernetes.io~1worker 29 | value: "" 30 | loop: "{{ groups['workers'] }}" 31 | 32 | - name: Remove taint from master nodes 33 | kubernetes.core.k8s_json_patch: 34 | kind: Node 35 | name: "{{ hostvars[item].host_fqdn }}" 36 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 37 | patch: 38 | - op: remove 39 | path: /spec/taints/0 40 | loop: "{{ groups['masters'] }}" 41 | when: k8s.master_schedulable 42 | register: result 43 | failed_when: 44 | - result.status is defined 45 | - result.status != 422 46 | -------------------------------------------------------------------------------- /33_install_metalLB.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare cluster to install metalLB 3 | hosts: vm_host 4 | run_once: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Configure MetalLB 9 | when: metallb.install_metallb 10 | block: 11 | - name: Render template for L2 configuration 12 | ansible.builtin.template: 13 | src: templates/metallb-l2.j2 14 | dest: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/metallb-l2.yml 15 | mode: "0755" 16 | 17 | - name: Download MetalLB CR for later apply 18 | ansible.builtin.get_url: 19 | url: "{{ item.url }}" 20 | dest: "{{ item.name }}" 21 | mode: "0664" 22 | loop: 23 | - name: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/metallb.yml 24 | url: "{{ metallb_setup.manifest_url }}/metallb-native.yaml" 25 | 26 | - name: Ensure MetalLB is installed in your cluster 27 | kubernetes.core.k8s: 28 | state: present 29 | src: "{{ item }}" 30 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 31 | apply: true 32 | wait: true 33 | loop: 34 | - /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/metallb.yml 35 | 36 | - name: Ensure MetalLB configuration CRD is added in your cluster 37 | kubernetes.core.k8s: 38 | state: present 39 | src: "/tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/metallb-l2.yml" 40 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 41 | wait: true 42 | apply: true 43 | -------------------------------------------------------------------------------- /99_cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup playbook 3 | hosts: vm_host 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: Set home directory 8 | ansible.builtin.set_fact: 9 | home_dir: "{{ ansible_env.HOME }}" 10 | 11 | - name: Destroy cluster VM 12 | community.general.terraform: 13 | force_init: true 14 | project_path: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}" 15 | state: absent 16 | variables: 17 | pool_name: "{{ k8s.cluster_name | default('k8s-test', true) }}" 18 | network_name: "{{ k8s.cluster_name | default('k8s-test', true) }}" 19 | become: true 20 | 21 | - name: Ensure images are deleted 22 | ansible.builtin.file: 23 | path: "{{ item }}" 24 | state: absent 25 | loop: 26 | - /tmp/OS-GenericCloud.qcow2 27 | become: true 28 | 29 | - name: Delete all created paths 30 | ansible.builtin.file: 31 | path: "{{ item }}" 32 | state: absent 33 | loop: 34 | - "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}" 35 | become: true 36 | 37 | - name: Cleanup local DNS config 38 | ansible.builtin.file: 39 | path: "{{ item }}" 40 | state: absent 41 | loop: 42 | - /etc/NetworkManager/dnsmasq.d/{{ k8s.cluster_name | default('k8s-test', true) }}-libvirt_dnsmasq.conf 43 | - /etc/NetworkManager/conf.d/{{ k8s.cluster_name | default('k8s-test', true) }}-localdns.conf 44 | become: true 45 | 46 | - name: Restart NetworkManager and libvirtd 47 | ansible.builtin.service: 48 | name: "{{ item }}" 49 | state: restarted 50 | loop: 51 | - virtqemud 52 | - NetworkManager 53 | become: true 54 | -------------------------------------------------------------------------------- /templates/inventory.j2: -------------------------------------------------------------------------------- 1 | [vm_host] 2 | localhost ansible_connection=local 3 | [masters] 4 | {% for master in groups['masters'] %} 5 | {{ hostvars[master].host_fqdn }} ansible_host={{ hostvars[master].host_ip }} ansible_ssh_private_key_file={{ hostvars[master].ansible_ssh_private_key_file }} ansible_user={{ hostvars[master].ansible_user }} ansible_ssh_common_args="{{ hostvars[master].ansible_ssh_common_args }}" host_fqdn={{ hostvars[master].host_fqdn }} host_ip={{ hostvars[master].host_ip }} {% if loop.first %} kubehash={{ hostvars[master].kubehash }} kubetoken={{ hostvars[master].kubetoken }} kubecertkey={{ hostvars[master].kubecertkey }} k8s_installed={{ hostvars[master].k8s_installed }} {% else %} k8s_joined={{ hostvars[master].k8s_joined }} {% endif %} 6 | {% endfor %} 7 | {% if k8s.worker_nodes.vms > 0 %} 8 | [workers] 9 | {% endif %} 10 | {% for worker in groups['workers'] %} 11 | {{ hostvars[worker].host_fqdn }} ansible_host={{ hostvars[worker].host_ip }} ansible_ssh_private_key_file={{ hostvars[worker].ansible_ssh_private_key_file }} ansible_user={{ hostvars[worker].ansible_user }} ansible_ssh_common_args="{{ hostvars[worker].ansible_ssh_common_args }}" host_fqdn={{ hostvars[worker].host_fqdn }} host_ip={{ hostvars[worker].host_ip }} k8s_joined={{ hostvars[worker].k8s_joined }} 12 | {% endfor %} 13 | [k8s_nodes:children] 14 | {% if k8s.worker_nodes.vms > 0 %} 15 | workers 16 | {% endif %} 17 | masters 18 | {% if k8s.control_plane.vms > 1 or k8s.worker_nodes.vms > 1 %} 19 | [loadbalancer] 20 | {% for lb in groups['loadbalancer'] %} 21 | {{ hostvars[lb].host_fqdn }} ansible_host={{ hostvars[lb].host_ip }} ansible_ssh_private_key_file={{ hostvars[lb].ansible_ssh_private_key_file }} ansible_user={{ hostvars[lb].ansible_user }} ansible_ssh_common_args="{{ hostvars[lb].ansible_ssh_common_args }}" host_fqdn={{ hostvars[lb].host_fqdn }} host_ip={{ hostvars[lb].host_ip }} 22 | {% endfor %} 23 | {% endif %} 24 | -------------------------------------------------------------------------------- /01_install_virtualization_tools.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: This play installs needed tools to provision infrastructure VMs 3 | hosts: vm_host 4 | become: true 5 | tasks: 6 | - name: Manage packets when distro is 'CentOS' 7 | when: ansible_distribution == 'Fedora' or ansible_distribution == 'CentOS' 8 | block: 9 | - name: Upgrade all packages 10 | ansible.builtin.yum: 11 | name: "*" 12 | state: latest # noqa package-latest 13 | 14 | - name: Virtualization services are enabled 15 | ansible.builtin.service: 16 | name: virtqemud 17 | state: started 18 | enabled: true 19 | 20 | - name: Install required packages 21 | ansible.builtin.yum: 22 | name: "{{ os_packages.centos }}" 23 | state: latest # noqa package-latest 24 | when: 25 | - ansible_distribution == 'CentOS' 26 | 27 | - name: Install required packages 28 | ansible.builtin.yum: 29 | name: "{{ os_packages.fedora }}" 30 | state: present 31 | when: 32 | - ansible_distribution == 'Fedora' 33 | 34 | - name: Manage packets when distro is 'Ubuntu' 35 | when: ansible_distribution == 'Ubuntu' 36 | block: 37 | - name: Upgrade all packages 38 | ansible.builtin.apt: 39 | name: "*" 40 | state: latest # noqa package-latest 41 | update_cache: true 42 | 43 | - name: Install required packages 44 | ansible.builtin.apt: 45 | name: "{{ os_packages.ubuntu }}" 46 | state: present 47 | 48 | - name: Virtualization services are enabled 49 | ansible.builtin.service: 50 | name: libvirtd 51 | state: started 52 | enabled: true 53 | 54 | - name: Download and provision Terraform 55 | ansible.builtin.unarchive: 56 | src: "{{ terraform_url }}" 57 | dest: /usr/bin/ 58 | mode: "0755" 59 | remote_src: true 60 | -------------------------------------------------------------------------------- /templates/haproxy.j2: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local0 3 | log /dev/log local1 notice 4 | daemon 5 | 6 | defaults 7 | mode http 8 | log global 9 | option httplog 10 | option dontlognull 11 | option http-server-close 12 | option forwardfor except 127.0.0.0/8 13 | option redispatch 14 | retries 1 15 | timeout http-request 10s 16 | timeout queue 20s 17 | timeout connect 5s 18 | timeout client 20s 19 | timeout server 20s 20 | timeout http-keep-alive 10s 21 | timeout check 10s 22 | 23 | {% if k8s.control_plane.vms > 1 %} 24 | frontend kubernetes-control-plane 25 | bind *:6443 26 | default_backend kubernetes-control-plane 27 | mode tcp 28 | option tcplog 29 | 30 | backend kubernetes-control-plane 31 | option httpchk GET /healthz 32 | balance roundrobin 33 | mode tcp 34 | option ssl-hello-chk 35 | 36 | {% for node in groups['masters'] %} 37 | server {{ hostvars[node].host_fqdn }} {{ hostvars[node].host_ip }}:6443 check 38 | {% endfor %} 39 | {% endif %} 40 | 41 | {% if ingress_controller.install_ingress_controller %} 42 | frontend ingress_http_nodeport 43 | bind *:80 44 | default_backend ingress_http_nodeport 45 | mode tcp 46 | option tcplog 47 | 48 | backend ingress_http_nodeport 49 | balance source 50 | mode tcp 51 | 52 | {% for node in groups['workers'] %} 53 | server {{ hostvars[node].host_fqdn }} {{ hostvars[node].host_ip }}:{{ ingress_controller.node_port.http }} check 54 | {% endfor %} 55 | 56 | frontend ingress_https_nodeport 57 | bind *:443 58 | default_backend ingress_https_nodeport 59 | mode tcp 60 | option tcplog 61 | 62 | backend ingress_https_nodeport 63 | balance source 64 | mode tcp 65 | 66 | {% for node in groups['workers'] %} 67 | server {{ hostvars[node].host_fqdn }} {{ hostvars[node].host_ip }}:{{ ingress_controller.node_port.https }} check 68 | {% endfor %} 69 | 70 | {% endif %} 71 | -------------------------------------------------------------------------------- /main.yml: -------------------------------------------------------------------------------- 1 | - name: Setup | Playbook to run pre-flight test to verify vars 2 | import_playbook: 00_pre_flight_checklist.yml 3 | 4 | - name: Setup | Playbook to ensure virtualization tools are present 5 | import_playbook: 01_install_virtualization_tools.yml 6 | 7 | - name: Setup | Playbook to take care of necessary tools 8 | import_playbook: 02_prepare_setup.yml 9 | 10 | - name: Setup | Playbook to take care of libvirt resources 11 | import_playbook: 03_provision_libvirt_resources.yml 12 | 13 | - name: Setup | Playbook to ensure VMs are in place 14 | import_playbook: 04_provisioning_vms.yml 15 | 16 | - name: Setup | Playbook to configure a loadbalancer 17 | when: groups['masters'] | length > 1 or 18 | groups['workers'] | length > 1 19 | import_playbook: 08_loadbalancer_services.yml 20 | 21 | - name: Setup | Playbook to ensure a container runtime is configured 22 | import_playbook: 10_container_runtimes.yml 23 | 24 | - name: Setup | Playbook to setup needed k8s packages 25 | import_playbook: 11_install_kube_packages.yml 26 | 27 | - name: Setup | Playbook to configure kubeadm 28 | import_playbook: 12_setup_kubeadm_config.yml 29 | 30 | - name: Setup | Playbook to setup the control plane 31 | import_playbook: 13_ignite_control_plane.yml 32 | 33 | - name: Setup | Playbook to setup network plugin 34 | import_playbook: 22_apply_network_plugin.yml 35 | 36 | - name: Setup | Playbook to join additional control plane nodes 37 | import_playbook: 20_join_control_plane.yml 38 | when: groups['masters'] | length > 1 39 | 40 | - name: Setup | Playbook to join worker nodes 41 | import_playbook: 21_join_nodes.yml 42 | when: groups['workers'] is defined 43 | 44 | - name: Setup | Playbook to complete the setup 45 | import_playbook: 25_complete_setup.yml 46 | 47 | - name: Setup | Playbook to persist inventory 48 | import_playbook: 29_save_inventory.yml 49 | 50 | - name: Setup | Playbook to configure an ingress controller 51 | import_playbook: 30_install_ingress_controller.yml 52 | 53 | - name: Setup | Playbook to setup Rook 54 | import_playbook: 32_install_rook.yml 55 | 56 | - name: Setup | Playbook to setup MetalLB 57 | import_playbook: 33_install_metalLB.yml 58 | -------------------------------------------------------------------------------- /32_install_rook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare cluster to install rook 3 | hosts: vm_host 4 | run_once: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Configure Rook 9 | when: rook_ceph.install_rook 10 | block: 11 | - name: Ensure the needed Namespaces exist. 12 | kubernetes.core.k8s: 13 | definition: 14 | api_version: v1 15 | kind: Namespace 16 | metadata: 17 | name: rook-ceph 18 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 19 | state: present 20 | 21 | - name: Add helm chart repository for Rook 22 | kubernetes.core.helm_repository: 23 | name: "{{ item.name }}" 24 | repo_url: "{{ item.repo_url }}" 25 | loop: 26 | - name: "{{ rook.operator.chart.name }}" 27 | repo_url: "{{ rook.operator.chart.url }}" 28 | - name: "{{ rook.cluster.chart.name }}" 29 | repo_url: "{{ rook.cluster.chart.url }}" 30 | 31 | - name: Ensure rook-operator helm chart is installed 32 | kubernetes.core.helm: 33 | name: rook-operator 34 | chart_ref: "{{ rook.operator.chart.ref }}" 35 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 36 | release_namespace: rook-ceph 37 | update_repo_cache: true 38 | values: 39 | csi: 40 | provisionerReplicas: "{{ rook_ceph.rook_cluster_size }}" 41 | wait: true 42 | 43 | - name: Trigger rook template 44 | ansible.builtin.template: 45 | src: templates/rook-values.yml.j2 46 | dest: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/rook-values.yaml 47 | mode: "0755" 48 | 49 | - name: Ensure rook-ceph-cluster helm chart is installed 50 | kubernetes.core.helm: 51 | name: rook-ceph-cluster 52 | chart_ref: "{{ rook.cluster.chart.ref }}" 53 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 54 | release_namespace: rook-ceph 55 | update_repo_cache: true 56 | values_files: 57 | - /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/rook-values.yaml 58 | wait: true 59 | -------------------------------------------------------------------------------- /13_ignite_control_plane.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install cluster with kubeadm 3 | vars_files: 4 | - vars/k8s_cluster.yml 5 | hosts: masters[0] 6 | tasks: 7 | - name: Perform k8s setup 8 | when: k8s_installed is not defined 9 | block: 10 | - name: Start kubeadm install # noqa no-changed-when # noqa command-instead-of-shell 11 | ansible.builtin.shell: kubeadm init --config /tmp/kubeadm-config.yaml --upload-certs 12 | become: true 13 | 14 | - name: Get information on generated certificate 15 | community.crypto.x509_certificate_info: 16 | path: /etc/kubernetes/pki/ca.crt 17 | register: cert 18 | become: true 19 | 20 | - name: Set fact for certificate hash 21 | ansible.builtin.set_fact: 22 | kubehash: "sha256:{{ cert.public_key_fingerprints.sha256 | replace(':', '') }}" 23 | 24 | - name: Create kube directory 25 | ansible.builtin.file: 26 | path: /home/kube/.kube 27 | state: directory 28 | mode: "0755" 29 | 30 | - name: Copy kubeconfig 31 | ansible.builtin.copy: 32 | src: /etc/kubernetes/admin.conf 33 | dest: /home/kube/.kube/config 34 | remote_src: true 35 | owner: "{{ ansible_user_id }}" 36 | group: "{{ ansible_user_id }}" 37 | mode: "0755" 38 | become: true 39 | 40 | - name: Fetch kubeconfig file 41 | ansible.builtin.slurp: 42 | src: /etc/kubernetes/admin.conf 43 | register: kube_master 44 | become: true 45 | 46 | - name: Set kubeconfig as fact 47 | ansible.builtin.set_fact: 48 | kubeconfig: "{{ kube_master['content'] | b64decode }}" 49 | 50 | - name: Set a flag if cluster setup is successful 51 | ansible.builtin.set_fact: 52 | k8s_installed: true 53 | 54 | - name: Verify cluster has been initialized 55 | vars_files: 56 | - vars/k8s_cluster.yml 57 | hosts: vm_host 58 | tasks: 59 | - name: Save kubeconfig as file 60 | ansible.builtin.copy: 61 | content: "{{ hostvars[groups['masters'][0]]['kubeconfig'] }}" 62 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 63 | mode: "0755" 64 | 65 | - name: Wait for control-plane pods to be up and running 66 | kubernetes.core.k8s: 67 | state: present 68 | api_version: v1 69 | kind: Pod 70 | namespace: kube-system 71 | label_selectors: tier = control-plane 72 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 73 | wait: true 74 | when: hostvars[groups['masters'][0]].k8s_installed is not defined 75 | -------------------------------------------------------------------------------- /03_provision_libvirt_resources.yml: -------------------------------------------------------------------------------- 1 | - name: This play provisions libvirt resources with terraform 2 | hosts: vm_host 3 | become: true 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: SELinux fix for running images in different folders 8 | when: (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version | int >= 18) 9 | block: 10 | - name: Ensure security_driver is disabled 11 | ansible.builtin.lineinfile: 12 | line: 'security_driver = "none"' 13 | path: /etc/libvirt/qemu.conf 14 | state: present 15 | 16 | - name: Restart libvirtd service 17 | ansible.builtin.service: 18 | name: libvirtd 19 | state: restarted 20 | 21 | - name: Take care of systemd-resolved on F33 and Ubuntu hosts 22 | when: 23 | (ansible_distribution == 'Fedora' and ansible_distribution_major_version | int > 33) or 24 | (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version | int > 18) 25 | block: 26 | - name: Ensure systemd-resolved config dir is present 27 | ansible.builtin.file: 28 | path: /etc/systemd/resolved.conf.d/ 29 | state: directory 30 | mode: "0755" 31 | 32 | - name: Enable localdns if systemd-resolved is present 33 | ansible.builtin.template: 34 | src: systemd-resolved.j2 35 | dest: /etc/systemd/resolved.conf.d/{{ k8s.cluster_name | default('k8s-test', true) }}-local-kube.conf 36 | mode: "0755" 37 | notify: 38 | - Restart systemd-resolved 39 | 40 | - name: Ensure NM configuration directory exists 41 | ansible.builtin.file: 42 | path: /etc/NetworkManager/conf.d 43 | state: directory 44 | mode: "0755" 45 | 46 | - name: Ensure NM dnsmasq directory exists 47 | ansible.builtin.file: 48 | path: /etc/NetworkManager/dnsmasq.d 49 | state: directory 50 | mode: "0755" 51 | 52 | - name: Configure NetworkManager for local DNS 53 | ansible.builtin.copy: 54 | src: files/localdns.conf 55 | dest: /etc/NetworkManager/conf.d/{{ k8s.cluster_name | default('k8s-test', true) }}-localdns.conf 56 | mode: "0755" 57 | notify: 58 | - Restart NetworkManager 59 | 60 | - name: Configure NetworkManager for libvirt network 61 | ansible.builtin.template: 62 | src: templates/libvirt_dnsmasq.j2 63 | dest: /etc/NetworkManager/dnsmasq.d/{{ k8s.cluster_name | default('k8s-test', true) }}-libvirt_dnsmasq.conf 64 | mode: "0755" 65 | notify: 66 | - Restart NetworkManager 67 | 68 | handlers: 69 | - name: Restart systemd-resolved 70 | ansible.builtin.service: 71 | name: systemd-resolved 72 | state: restarted 73 | enabled: true 74 | 75 | - name: Restart NetworkManager 76 | ansible.builtin.service: 77 | name: NetworkManager 78 | state: restarted 79 | -------------------------------------------------------------------------------- /00_pre_flight_checklist.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Pre-flight checklist before installing k8s 3 | hosts: vm_host 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: Check if kubernetes version is supported by the provisioner 8 | ansible.builtin.fail: 9 | msg: The chosen version is not supported, only versions >= 1.32 are supported 10 | when: k8s.cluster_version is version('1.32', '<') 11 | 12 | - name: Check if distribution is supported 13 | ansible.builtin.fail: 14 | msg: Your distribution is actually unsupported. Supported values are 'Fedora', 'CentOS', 'Ubuntu' 15 | when: 16 | - ansible_distribution != 'CentOS' 17 | - ansible_distribution != 'Fedora' 18 | - ansible_distribution != 'Ubuntu' 19 | 20 | - name: Check if target distribution is correct 21 | ansible.builtin.fail: 22 | msg: Target distribution is not supported. Supported values are 'Ubuntu', 'CentOS' 23 | when: 24 | - k8s.cluster_os != 'CentOS' 25 | - k8s.cluster_os != 'Ubuntu' 26 | 27 | # - name: Check if cri-o is selected on 1.29 28 | # ansible.builtin.fail: 29 | # msg: Please select a different container runtime as cri-o packages are not yet available for {{ k8s.cluster_version }} and {{ k8s.cluster_os }} 30 | # when: 31 | # - k8s.container_runtime == 'crio' 32 | # - k8s.cluster_version is version('1.29', '==') 33 | # # - k8s.cluster_os == 'CentOS' 34 | 35 | - name: Check at least one vm for control plane is defined 36 | ansible.builtin.fail: 37 | msg: At least one control plane should be selected 38 | when: 39 | - k8s.control_plane.vms is not defined or k8s.control_plane.vms <= 0 40 | 41 | - name: Check if master is schedulable in case of clusters composed by control plane VMs 42 | ansible.builtin.fail: 43 | msg: When provisioning a cluster without workers, you need to set k8s.master_schedulable to true in vars. 44 | when: 45 | - k8s.worker_nodes.vms is not defined or k8s.worker_nodes.vms <= 0 46 | - not k8s.master_schedulable 47 | 48 | - name: Fail fast if Rook cluster size exceeds worker nodes. 49 | ansible.builtin.fail: 50 | msg: You can't have less worker nodes than Rook cluster size 51 | when: 52 | - k8s.worker_nodes.vms < rook_ceph.rook_cluster_size 53 | - rook_ceph.install_rook 54 | 55 | - name: Fail fast if no container runtime is defined 56 | ansible.builtin.fail: 57 | msg: Select at least container runtime, 'containerd', 'crio' 58 | when: 59 | - k8s.container_runtime != 'crio' 60 | - k8s.container_runtime != 'containerd' 61 | 62 | - name: Fail fast if no ingress controller is selected 63 | ansible.builtin.fail: 64 | msg: If you want to install an ingress controller, select one among 'nginx', 'haproxy' or 'contour' 65 | when: 66 | - ingress_controller.install_ingress_controller 67 | - ingress_controller.type != 'nginx' and ingress_controller.type != 'haproxy' and ingress_controller.type != 'contour' 68 | 69 | - name: Fail fast if no container plugin selected 70 | ansible.builtin.fail: 71 | msg: Select at least a CNI plugin 'calico' or 'flannel' 72 | when: 73 | - k8s.network.cni_plugin != 'calico' 74 | - k8s.network.cni_plugin != 'flannel' 75 | - k8s.network.cni_plugin != 'cilium' 76 | -------------------------------------------------------------------------------- /11_install_kube_packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure kube packages are installed 3 | hosts: k8s_nodes 4 | become: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Add Kubernetes repository 9 | ansible.builtin.yum_repository: 10 | name: kubernetes 11 | description: Kubernetes repo 12 | baseurl: "{{ kubernetes.centos.k8s_repo }}" 13 | gpgcheck: true 14 | repo_gpgcheck: true 15 | gpgkey: "{{ kubernetes.centos.k8s_repo_key }}" 16 | exclude: kubelet kubeadm kubectl 17 | when: k8s.cluster_os == 'CentOS' 18 | 19 | - name: Ensure required packages for kubetools are installed 20 | ansible.builtin.apt: 21 | name: 22 | - apt-transport-https 23 | - curl 24 | state: present 25 | when: k8s.cluster_os == 'Ubuntu' 26 | 27 | - name: Add kube-repo key 28 | ansible.builtin.apt_key: 29 | url: "{{ kubernetes.ubuntu.k8s_repo_key }}" 30 | keyring: "{{ kubernetes.ubuntu.k8s_repo_keyring }}" 31 | state: present 32 | when: k8s.cluster_os == 'Ubuntu' 33 | 34 | - name: Ensure the presence of apt-repo for kubernetes packages 35 | ansible.builtin.apt_repository: 36 | repo: "{{ kubernetes.ubuntu.k8s_repo }}" 37 | filename: "{{ kubernetes.ubuntu.k8s_repo_file }}" 38 | state: present 39 | when: k8s.cluster_os == 'Ubuntu' 40 | 41 | - name: Ensure Kubernetes packages are installed 42 | ansible.builtin.apt: 43 | name: "{{ kubernetes.packages.k8s_packages }}" 44 | state: present 45 | when: k8s.cluster_os == 'Ubuntu' 46 | 47 | - name: Ensure kubelet, kubeadm, kubectl are on hold 48 | ansible.builtin.dpkg_selections: 49 | name: "{{ item }}" 50 | selection: hold 51 | loop: 52 | - kubectl 53 | - kubeadm 54 | - kubelet 55 | when: k8s.cluster_os == 'Ubuntu' 56 | 57 | - name: Disable swap # noqa no-changed-when 58 | ansible.builtin.command: swapoff -a 59 | 60 | - name: Remove swap entry from fstab 61 | ansible.builtin.lineinfile: 62 | line: "/dev/mapper/cl-swap swap" 63 | path: /etc/fstab 64 | state: absent 65 | 66 | - name: Disable SELinux 67 | ansible.posix.selinux: 68 | state: disabled 69 | register: selinux_output 70 | notify: Reboot host 71 | when: k8s.cluster_os == 'CentOS' 72 | 73 | - name: Install kubepackages 74 | ansible.builtin.yum: 75 | name: "{{ kubernetes.packages.k8s_packages }}" 76 | state: present 77 | disable_excludes: kubernetes 78 | when: k8s.cluster_os == 'CentOS' 79 | 80 | - name: Install lvm2 for rook support and git for installing rook 81 | ansible.builtin.yum: 82 | name: 83 | - lvm2 84 | - git 85 | when: 86 | - rook_ceph.install_rook 87 | - k8s.cluster_os == 'CentOS' 88 | 89 | - name: Install lvm2 for rook support and git for installing rook 90 | ansible.builtin.apt: 91 | name: 92 | - lvm2 93 | - git 94 | when: 95 | - rook_ceph.install_rook 96 | - k8s.cluster_os == 'Ubuntu' 97 | 98 | - name: Enable kubelet 99 | ansible.builtin.systemd: 100 | name: kubelet 101 | state: started 102 | enabled: true 103 | notify: Reboot host 104 | 105 | handlers: 106 | - name: Reboot host 107 | ansible.builtin.reboot: 108 | -------------------------------------------------------------------------------- /templates/rook-values.yml.j2: -------------------------------------------------------------------------------- 1 | cephClusterSpec: 2 | cephVersion: 3 | image: quay.io/ceph/ceph:v19 4 | allowUnsupported: false 5 | mon: 6 | count: {{ rook_ceph.rook_cluster_size | int }} 7 | allowMultiplePerNode: false 8 | mgr: 9 | count: {{ rook_ceph.rook_cluster_size | int }} 10 | allowMultiplePerNode: false 11 | configOverride: | 12 | [global] 13 | mon_warn_on_pool_no_redundancy = false 14 | bdev_flock_retry = 20 15 | bluefs_buffered_io = false 16 | mon_allow_pool_delete = true 17 | osd_pool_default_size = {{ rook_ceph.rook_cluster_size | int }} 18 | osd_pool_default_min_size = {{ rook_ceph.rook_cluster_size | int }} 19 | cephBlockPools: 20 | - name: ceph-blockpool 21 | spec: 22 | failureDomain: host 23 | replicated: 24 | size: {{ rook_ceph.rook_cluster_size | int }} 25 | storageClass: 26 | enabled: true 27 | name: ceph-block 28 | isDefault: true 29 | reclaimPolicy: Delete 30 | allowVolumeExpansion: true 31 | parameters: 32 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner 33 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph 34 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner 35 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph 36 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node 37 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph 38 | cephFileSystems: 39 | - name: ceph-filesystem 40 | spec: 41 | metadataPool: 42 | replicated: 43 | size: {{ rook_ceph.rook_cluster_size | int }} 44 | dataPools: 45 | - failureDomain: host 46 | replicated: 47 | size: {{ rook_ceph.rook_cluster_size | int }} 48 | name: data0 49 | metadataServer: 50 | activeCount: {{ rook_ceph.rook_cluster_size | int }} 51 | activeStandby: false 52 | resources: 53 | limits: 54 | cpu: "2000m" 55 | memory: "4Gi" 56 | requests: 57 | cpu: "1000m" 58 | memory: "512Mi" 59 | priorityClassName: system-cluster-critical 60 | storageClass: 61 | enabled: true 62 | isDefault: true 63 | name: ceph-filesystem 64 | pool: data0 65 | reclaimPolicy: Delete 66 | parameters: 67 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner 68 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph 69 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner 70 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph 71 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node 72 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph 73 | cephObjectStores: 74 | - name: ceph-objectstore 75 | spec: 76 | metadataPool: 77 | failureDomain: host 78 | replicated: 79 | size: {{ rook_ceph.rook_cluster_size | int }} 80 | dataPool: 81 | failureDomain: host 82 | erasureCoded: 83 | dataChunks: 2 84 | codingChunks: 1 85 | preservePoolsOnDelete: true 86 | gateway: 87 | port: 80 88 | resources: 89 | limits: 90 | cpu: "2000m" 91 | memory: "2Gi" 92 | requests: 93 | cpu: "1000m" 94 | memory: "1Gi" 95 | instances: 1 96 | priorityClassName: system-cluster-critical 97 | storageClass: 98 | enabled: true 99 | name: ceph-bucket 100 | reclaimPolicy: Delete -------------------------------------------------------------------------------- /group_vars/vm_host/vars.yml: -------------------------------------------------------------------------------- 1 | ################################## 2 | ## Infra related ## 3 | ################################## 4 | workspace_directory: 5 | base_path: "{{ home_dir }}/k8s-setup" 6 | 7 | terraform_url: https://releases.hashicorp.com/terraform/1.14.3/terraform_1.14.3_linux_amd64.zip 8 | image_name: OS-GenericCloud 9 | centos: 10 | cloud_image: https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2 11 | ubuntu_jammy: 12 | cloud_image: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 13 | 14 | libvirt: 15 | storage: 16 | pool_path: /var/lib/libvirt/images 17 | 18 | os_packages: 19 | centos: 20 | - edk2-ovmf 21 | - gcc 22 | - git 23 | - libvirt 24 | - make 25 | - python3-libvirt 26 | - python3-lxml 27 | - python3-netaddr 28 | - python3-kubernetes 29 | - python3-jsonpatch 30 | - python3-netaddr 31 | - python3-pip 32 | - python3-yaml 33 | - python3-venv 34 | - qemu-kvm 35 | - unzip 36 | - virt-install 37 | - virt-manager 38 | 39 | fedora: 40 | - edk2-ovmf 41 | - gcc 42 | - git 43 | - libvirt 44 | - libvirt-devel 45 | - make 46 | - python3-libvirt 47 | - python3-lxml 48 | - python3-netaddr 49 | - python3-kubernetes 50 | - python3-jsonpatch 51 | - python3-netaddr 52 | - python3-pip 53 | - python3-yaml 54 | - python3-venv 55 | - qemu-kvm 56 | - unzip 57 | - virt-install 58 | - virt-manager 59 | 60 | ubuntu: 61 | - bridge-utils 62 | - gcc 63 | - genisoimage 64 | - git 65 | - libvirt-clients 66 | - libvirt-daemon-system 67 | - libvirt-dev 68 | - make 69 | - network-manager 70 | - ovmf 71 | - python3-libvirt 72 | - python3-lxml 73 | - python3-netaddr 74 | - python3-kubernetes 75 | - python3-jsonpatch 76 | - python3-netaddr 77 | - python3-pip 78 | - python3-yaml 79 | - python3-venv 80 | - qemu-kvm 81 | - unzip 82 | - virt-manager 83 | - virtinst 84 | - xsltproc 85 | 86 | 87 | ################################## 88 | ## Kubernetes related ## 89 | ################################## 90 | 91 | cni_plugins: 92 | calico: 93 | calico_operator: https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/tigera-operator.yaml 94 | calico_crd: https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/custom-resources.yaml 95 | flannel: 96 | flannel_repo: https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 97 | cilium: 98 | chart: 99 | name: cilium 100 | ref: cilium/cilium 101 | url: https://helm.cilium.io/ 102 | 103 | ingress: 104 | nginx: 105 | chart: 106 | name: ingress-nginx 107 | url: https://kubernetes.github.io/ingress-nginx 108 | ref: ingress-nginx/ingress-nginx 109 | contour: 110 | chart: 111 | name: bitnami 112 | url: https://charts.bitnami.com/bitnami 113 | ref: bitnami/contour 114 | haproxy: 115 | chart: 116 | name: haproxytech 117 | url: https://haproxytech.github.io/helm-charts 118 | ref: haproxytech/kubernetes-ingress 119 | 120 | helm: 121 | helm_installer: https://get.helm.sh/helm-v4.0.0-linux-amd64.tar.gz 122 | 123 | rook: 124 | operator: 125 | chart: 126 | name: rook-release 127 | url: https://charts.rook.io/release 128 | ref: rook-release/rook-ceph 129 | cluster: 130 | chart: 131 | name: rook-release 132 | url: https://charts.rook.io/release 133 | ref: rook-release/rook-ceph-cluster 134 | 135 | metallb_setup: 136 | manifest_url: https://raw.githubusercontent.com/metallb/metallb/v0.15.2/config/manifests 137 | -------------------------------------------------------------------------------- /02_prepare_setup.yml: -------------------------------------------------------------------------------- 1 | - name: This play ensures environment is set up for cluster creation 2 | hosts: vm_host 3 | vars_files: 4 | - vars/k8s_cluster.yml 5 | tasks: 6 | - name: Set user home as fact 7 | ansible.builtin.set_fact: 8 | home_dir: "{{ ansible_env.HOME }}" 9 | 10 | - name: Ensure workspace directory exists 11 | ansible.builtin.file: 12 | path: "{{ workspace_directory.base_path }}" 13 | state: directory 14 | mode: "0755" 15 | 16 | - name: Ensure cluster folder exists 17 | ansible.builtin.file: 18 | path: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}" 19 | state: directory 20 | mode: "0755" 21 | 22 | - name: Ensure pivot tmp folder exists 23 | ansible.builtin.file: 24 | path: "/tmp/{{ k8s.cluster_name | default('k8s-test', true) }}" 25 | state: directory 26 | mode: "0755" 27 | 28 | - name: Populate cluster folder with terraform files 29 | ansible.builtin.copy: 30 | src: "files/terraform/" 31 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}" 32 | mode: "0755" 33 | 34 | - name: Snapshot cluster configuration for further use 35 | ansible.builtin.copy: 36 | src: "vars" 37 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/" 38 | mode: "0755" 39 | 40 | - name: Ensure helm is installed 41 | ansible.builtin.unarchive: 42 | src: "{{ helm.helm_installer }}" 43 | dest: /tmp/ 44 | remote_src: true 45 | 46 | - name: Install helm in PATH 47 | ansible.builtin.copy: 48 | src: /tmp/linux-amd64/helm 49 | dest: /usr/bin/ 50 | remote_src: true 51 | mode: +x 52 | become: true 53 | 54 | - name: Install Helm Diff 55 | kubernetes.core.helm_plugin: 56 | state: present 57 | plugin_path: https://github.com/databus23/helm-diff 58 | 59 | - name: Remove directory 60 | ansible.builtin.file: 61 | path: /tmp/linux-amd64 62 | state: absent 63 | 64 | - name: Create ssh keypair 65 | community.crypto.openssh_keypair: 66 | path: "{{ playbook_dir }}/id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}" 67 | delegate_to: localhost 68 | 69 | - name: Copy SSH keys in working directory 70 | ansible.builtin.copy: 71 | src: "{{ playbook_dir }}/{{ item }}" 72 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/{{ item }}" 73 | mode: "0755" 74 | loop: 75 | - id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }} 76 | - id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}.pub 77 | 78 | - name: Getting ssh private key 79 | ansible.builtin.slurp: 80 | src: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}" # noqa yaml[line-length] 81 | register: k8s_key 82 | 83 | - name: Getting ssh public key 84 | ansible.builtin.slurp: 85 | src: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}.pub" # noqa yaml[line-length] 86 | register: k8s_key_pub 87 | 88 | - name: Set SSH keys as fact 89 | ansible.builtin.set_fact: 90 | k8s_key: "{{ k8s_key['content'] | b64decode }}" 91 | k8s_key_pub: "{{ k8s_key_pub['content'] | b64decode }}" 92 | 93 | - name: Download CentOS image 94 | ansible.builtin.get_url: 95 | url: "{{ centos.cloud_image }}" 96 | dest: /tmp/{{ image_name }}.qcow2 97 | mode: "0777" 98 | when: k8s.cluster_os == 'CentOS' 99 | 100 | - name: Download Ubuntu image 101 | ansible.builtin.get_url: 102 | url: "{{ ubuntu_jammy.cloud_image }}" 103 | dest: /tmp/{{ image_name }}.qcow2 104 | mode: "0777" 105 | when: k8s.cluster_os == 'Ubuntu' 106 | -------------------------------------------------------------------------------- /22_apply_network_plugin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Apply network plugin 3 | hosts: vm_host 4 | run_once: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Configure Calico 9 | when: k8s.network.cni_plugin == 'calico' 10 | block: 11 | - name: Download Calico manifest. 12 | ansible.builtin.get_url: 13 | url: "{{ item.url }}" 14 | dest: "{{ item.name }}" 15 | mode: "0664" 16 | loop: 17 | - name: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/calico-operator.yaml 18 | url: "{{ cni_plugins.calico.calico_operator }}" 19 | - name: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/calico-crd.yaml 20 | url: "{{ cni_plugins.calico.calico_crd }}" 21 | 22 | - name: Apply custom CIDR to calico installation manifest 23 | ansible.builtin.replace: 24 | path: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/calico-crd.yaml 25 | regexp: 192.168.0.0\/16 26 | replace: "{{ k8s.network.pod_cidr }}" 27 | 28 | # - name: Temporary fix for non ascii char in Calico CRD (https://github.com/projectcalico/api/pull/46) 29 | # ansible.builtin.replace: 30 | # path: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/calico-operator.yaml 31 | # regexp: \’ 32 | # replace: "" 33 | 34 | - name: Apply calico manifests to the cluster. 35 | kubernetes.core.k8s: 36 | state: present 37 | src: "{{ item }}" 38 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 39 | wait: true 40 | loop: 41 | - /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/calico-operator.yaml 42 | - /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/calico-crd.yaml 43 | 44 | - name: Configure Cilium 45 | when: k8s.network.cni_plugin == 'cilium' 46 | block: 47 | - name: Add helm chart repository for Cilium 48 | kubernetes.core.helm_repository: 49 | name: "{{ item.name }}" 50 | repo_url: "{{ item.repo_url }}" 51 | loop: 52 | - name: "{{ cni_plugins.cilium.chart.name }}" 53 | repo_url: "{{ cni_plugins.cilium.chart.url }}" 54 | 55 | - name: Ensure Cilium helm chart is installed 56 | kubernetes.core.helm: 57 | name: cilium 58 | chart_ref: "{{ cni_plugins.cilium.chart.ref }}" 59 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 60 | release_namespace: kube-system 61 | update_repo_cache: true 62 | values: 63 | ipam: 64 | mode: kubernetes 65 | wait: true 66 | 67 | - name: Configure flannel 68 | when: k8s.network.cni_plugin == 'flannel' 69 | block: 70 | - name: Download flannel manifest 71 | ansible.builtin.get_url: 72 | url: "{{ cni_plugins.flannel.flannel_repo }}" 73 | dest: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/kube-flannel.yaml 74 | mode: "0755" 75 | 76 | - name: Patch kube-flannel to use host-gw instead of vxlan 77 | ansible.builtin.replace: 78 | path: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/kube-flannel.yaml 79 | regexp: "vxlan" 80 | replace: "host-gw" 81 | 82 | - name: Apply flannel manifests to the cluster. 83 | kubernetes.core.k8s: 84 | state: present 85 | src: /tmp/{{ k8s.cluster_name | default('k8s-test', true) }}/kube-flannel.yaml 86 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 87 | wait: true 88 | 89 | - name: Wait for core-dns pods to be up and running 90 | kubernetes.core.k8s: 91 | state: present 92 | api_version: v1 93 | kind: Deployment 94 | namespace: kube-system 95 | name: coredns 96 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 97 | wait: true 98 | -------------------------------------------------------------------------------- /08_loadbalancer_services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Loadbalancer configuration play 3 | hosts: loadbalancer 4 | become: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Set default network manager 9 | ansible.builtin.set_fact: 10 | network_manager: "{{ 'NetworkManager' if k8s.cluster_os == 'CentOS' else 'systemd-networkd' }}" 11 | 12 | - name: Manage loadbalancer configuration 13 | when: k8s.control_plane.vms > 1 or (k8s.worker_nodes.vms > 1 and ingress_controller.install_ingress_controller) 14 | block: 15 | - name: Ensure required packages are present 16 | when: k8s.cluster_os == "CentOS" 17 | block: 18 | - name: Upgrade all packages 19 | ansible.builtin.dnf: 20 | name: "*" 21 | state: latest # noqa package-latest 22 | 23 | - name: Install needed packages 24 | ansible.builtin.dnf: 25 | name: 26 | - firewalld 27 | - haproxy 28 | state: present 29 | 30 | - name: Ensure required packages are present 31 | when: k8s.cluster_os == "Ubuntu" 32 | block: 33 | - name: Upgrade all packages 34 | ansible.builtin.apt: 35 | name: "*" 36 | state: latest # noqa package-latest 37 | 38 | - name: Install needed packages 39 | ansible.builtin.apt: 40 | name: 41 | - firewalld 42 | - haproxy 43 | state: present 44 | 45 | - name: Ensure firewalld, haproxy and NM are enabled 46 | ansible.builtin.service: 47 | name: "{{ item }}" 48 | enabled: true 49 | state: started 50 | loop: 51 | - firewalld 52 | - haproxy 53 | - "{{ network_manager }}" 54 | 55 | - name: Firewall settings for Ubuntu 56 | when: k8s.cluster_os == 'Ubuntu' 57 | block: 58 | - name: Stop NetworkManager 59 | ansible.builtin.service: 60 | name: "{{ network_manager }}" 61 | state: stopped 62 | 63 | - name: Adding interface to firewall internal zone 64 | ansible.posix.firewalld: 65 | zone: internal 66 | interface: "{{ ansible_default_ipv4.interface }}" 67 | permanent: true 68 | state: enabled 69 | 70 | - name: Start NetworkManager 71 | ansible.builtin.service: 72 | name: "{{ network_manager }}" 73 | state: restarted 74 | enabled: true 75 | 76 | - name: Firewall settings for CentOS 77 | when: k8s.cluster_os == 'CentOS' 78 | block: 79 | - name: Adding interface to internal zone # noqa no-changed-when 80 | ansible.builtin.command: nmcli con mod "System eth0" connection.zone internal 81 | 82 | - name: Refreshing interface # noqa no-changed-when command-instead-of-shell 83 | ansible.builtin.shell: nmcli con down "System eth0"; nmcli con up "System eth0" 84 | 85 | - name: Restart NetworkManager 86 | ansible.builtin.service: 87 | name: "{{ network_manager }}" 88 | state: restarted 89 | 90 | - name: Allow service for internal zone 91 | ansible.posix.firewalld: 92 | zone: internal 93 | state: enabled 94 | permanent: true 95 | service: "{{ item }}" 96 | loop: 97 | - http 98 | - https 99 | 100 | - name: Allow ports for internal zone 101 | ansible.posix.firewalld: 102 | zone: internal 103 | state: enabled 104 | permanent: true 105 | port: "6443/tcp" 106 | 107 | - name: Reload firewalld service 108 | ansible.builtin.service: 109 | name: firewalld 110 | state: restarted 111 | 112 | - name: Enabling selinux boolean for haproxy 113 | ansible.posix.seboolean: 114 | name: haproxy_connect_any 115 | state: true 116 | persistent: true 117 | when: k8s.cluster_os == 'CentOS' 118 | 119 | - name: Firing haproxy template 120 | ansible.builtin.template: 121 | src: templates/haproxy.j2 122 | dest: /etc/haproxy/haproxy.cfg 123 | mode: "0755" 124 | 125 | - name: Reload haproxy service 126 | ansible.builtin.service: 127 | name: haproxy 128 | state: restarted 129 | enabled: true 130 | 131 | - name: Reboot loadbalancer 132 | ansible.builtin.reboot: 133 | -------------------------------------------------------------------------------- /files/terraform/main.tf: -------------------------------------------------------------------------------- 1 | provider "libvirt" { 2 | uri = "qemu:///system" 3 | } 4 | 5 | module "libvirt_pool" { 6 | source = "kubealex/libvirt-resources/libvirt//modules/terraform-libvirt-pool" 7 | 8 | pool_name = var.pool_name 9 | } 10 | 11 | module "libvirt_network" { 12 | source = "kubealex/libvirt-resources/libvirt//modules/terraform-libvirt-network" 13 | 14 | network_name = var.network_name 15 | network_domain = var.network_domain 16 | network_cidr = var.network_cidr 17 | network_dhcp_enabled = var.network_dhcp_enabled 18 | network_dns_enabled = var.network_dns_enabled 19 | network_dns_local = var.network_dns_local 20 | network_dnsmasq_options = var.network_dnsmasq_options 21 | } 22 | 23 | 24 | module "master_nodes" { 25 | source = "kubealex/libvirt-resources/libvirt//modules/terraform-libvirt-instance" 26 | 27 | depends_on = [ module.libvirt_network, module.libvirt_pool ] 28 | instance_network_interfaces = var.master_instance_network_interfaces 29 | instance_volume_size = var.master_instance_volume_size 30 | instance_libvirt_pool = var.master_instance_libvirt_pool 31 | instance_cloud_image = var.master_instance_cloud_image 32 | instance_hostname = var.master_instance_hostname 33 | instance_domain = var.master_instance_domain 34 | instance_memory = var.master_instance_memory 35 | instance_cpu = var.master_instance_cpu 36 | instance_count = var.master_instance_count 37 | instance_cloud_user = var.master_instance_cloud_user 38 | instance_uefi_enabled = var.master_instance_uefi_enabled 39 | instance_firmware = var.os_firmware 40 | } 41 | 42 | module "worker_nodes" { 43 | source = "kubealex/libvirt-resources/libvirt//modules/terraform-libvirt-instance" 44 | 45 | depends_on = [ module.libvirt_network, module.libvirt_pool ] 46 | instance_libvirt_pool = var.worker_instance_libvirt_pool 47 | instance_network_interfaces = var.worker_instance_network_interfaces 48 | instance_cloud_image = var.worker_instance_cloud_image 49 | instance_volume_size = var.worker_instance_volume_size 50 | instance_hostname = var.worker_instance_hostname 51 | instance_domain = var.worker_instance_domain 52 | instance_memory = var.worker_instance_memory 53 | instance_cpu = var.worker_instance_cpu 54 | instance_count = var.worker_instance_count 55 | instance_cloud_user = var.worker_instance_cloud_user 56 | instance_uefi_enabled = var.worker_instance_uefi_enabled 57 | instance_firmware = var.os_firmware 58 | } 59 | 60 | module "worker_nodes_rook" { 61 | source = "kubealex/libvirt-resources/libvirt//modules/terraform-libvirt-instance" 62 | 63 | count = var.worker_rook_enabled ? 1 : 0 64 | depends_on = [ module.libvirt_network, module.libvirt_pool ] 65 | instance_additional_volume_size = var.worker_rook_instance_additional_volume_size 66 | instance_volume_size = var.worker_rook_instance_volume_size 67 | instance_network_interfaces = var.worker_rook_instance_network_interfaces 68 | instance_libvirt_pool = var.worker_rook_instance_libvirt_pool 69 | instance_cloud_image = var.worker_rook_instance_cloud_image 70 | instance_hostname = var.worker_rook_instance_hostname 71 | instance_domain = var.worker_rook_instance_domain 72 | instance_memory = var.worker_rook_instance_memory 73 | instance_cpu = var.worker_rook_instance_cpu 74 | instance_count = var.worker_rook_instance_count 75 | instance_cloud_user = var.worker_rook_instance_cloud_user 76 | instance_uefi_enabled = var.worker_rook_instance_uefi_enabled 77 | instance_firmware = var.os_firmware 78 | } 79 | 80 | module "loadbalancer" { 81 | source = "kubealex/libvirt-resources/libvirt//modules/terraform-libvirt-instance" 82 | 83 | count = var.loadbalancer_enabled ? 1 : 0 84 | depends_on = [ module.libvirt_network, module.libvirt_pool ] 85 | instance_network_interfaces = var.loadbalancer_instance_network_interfaces 86 | instance_additional_volume_size = var.loadbalancer_instance_additional_volume_size 87 | instance_libvirt_pool = var.loadbalancer_instance_libvirt_pool 88 | instance_cloud_image = var.loadbalancer_instance_cloud_image 89 | instance_hostname = var.loadbalancer_instance_hostname 90 | instance_domain = var.loadbalancer_instance_domain 91 | instance_memory = var.loadbalancer_instance_memory 92 | instance_cpu = var.loadbalancer_instance_cpu 93 | instance_count = var.loadbalancer_instance_count 94 | instance_cloud_user = var.loadbalancer_instance_cloud_user 95 | instance_uefi_enabled = var.loadbalancer_instance_uefi_enabled 96 | instance_firmware = var.os_firmware 97 | } 98 | 99 | 100 | terraform { 101 | required_version = ">= 1.0" 102 | required_providers { 103 | libvirt = { 104 | source = "dmacvicar/libvirt" 105 | version = "0.8.3" 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /30_install_ingress_controller.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare cluster to install ingress controller 3 | hosts: vm_host 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | run_once: true 7 | tasks: 8 | - name: Configure Ingress controller 9 | when: ingress_controller.install_ingress_controller 10 | block: 11 | - name: Take all required steps to install contour as ingress controller 12 | when: 13 | - ingress_controller.type == 'contour' 14 | block: 15 | - name: Ensure the needed Namespaces exist. 16 | kubernetes.core.k8s: 17 | api_version: v1 18 | kind: Namespace 19 | name: projectcontour 20 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 21 | state: present 22 | 23 | - name: Add helm chart repository for Contour 24 | kubernetes.core.helm_repository: 25 | name: "{{ item.name }}" 26 | repo_url: "{{ item.repo_url }}" 27 | loop: 28 | - name: "{{ ingress.contour.chart.name }}" 29 | repo_url: "{{ ingress.contour.chart.url }}" 30 | 31 | - name: Ensure Contour helm chart is installed 32 | kubernetes.core.helm: 33 | name: contour 34 | chart_ref: "{{ ingress.contour.chart.ref }}" 35 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 36 | release_namespace: projectcontour 37 | update_repo_cache: true 38 | values: 39 | contour: 40 | ingressClass: "" 41 | replicaCount: 1 42 | defaultBackend: 43 | enabled: true 44 | envoy: 45 | kind: deployment 46 | useHostPort: false 47 | service: 48 | externalTrafficPolicy: Cluster 49 | type: NodePort 50 | nodePorts: 51 | http: "{{ ingress_controller.node_port.http }}" 52 | https: "{{ ingress_controller.node_port.https }}" 53 | wait: true 54 | 55 | - name: Take all required steps to install haproxy as ingress controller 56 | when: 57 | - ingress_controller.type == 'haproxy' 58 | block: 59 | - name: Ensure the needed Namespaces exist. 60 | kubernetes.core.k8s: 61 | api_version: v1 62 | kind: Namespace 63 | name: haproxy-controller 64 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 65 | state: present 66 | 67 | - name: Add helm chart repository for haproxy 68 | kubernetes.core.helm_repository: 69 | name: "{{ item.name }}" 70 | repo_url: "{{ item.repo_url }}" 71 | loop: 72 | - name: "{{ ingress.haproxy.chart.name }}" 73 | repo_url: "{{ ingress.haproxy.chart.url }}" 74 | 75 | - name: Ensure haproxy helm chart is installed 76 | kubernetes.core.helm: 77 | name: haproxy 78 | chart_ref: "{{ ingress.haproxy.chart.ref }}" 79 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 80 | release_namespace: haproxy-controller 81 | values: 82 | controller: 83 | ingressClass: null 84 | service: 85 | type: NodePort 86 | nodePorts: 87 | http: "{{ ingress_controller.node_port.http }}" 88 | https: "{{ ingress_controller.node_port.https }}" 89 | stat: 31024 90 | kind: Deployment 91 | replicaCount: 1 92 | update_repo_cache: true 93 | wait: true 94 | 95 | - name: Take all required steps to install Nginx as ingress controller 96 | when: 97 | - ingress_controller.type == 'nginx' 98 | block: 99 | - name: Ensure the needed Namespaces exist. 100 | kubernetes.core.k8s: 101 | definition: 102 | api_version: v1 103 | kind: Namespace 104 | metadata: 105 | name: ingress-nginx 106 | labels: 107 | app.kubernetes.io/name: ingress-nginx 108 | app.kubernetes.io/instance: ingress-nginx 109 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 110 | state: present 111 | 112 | - name: Add helm chart repository for Nginx 113 | kubernetes.core.helm_repository: 114 | name: "{{ item.name }}" 115 | repo_url: "{{ item.repo_url }}" 116 | loop: 117 | - name: "{{ ingress.nginx.chart.name }}" 118 | repo_url: "{{ ingress.nginx.chart.url }}" 119 | 120 | - name: Ensure Nginx helm chart is installed 121 | kubernetes.core.helm: 122 | name: nginx 123 | chart_ref: "{{ ingress.nginx.chart.ref }}" 124 | kubeconfig: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/admin.kubeconfig" 125 | release_namespace: ingress-nginx 126 | values: 127 | controller: 128 | service: 129 | type: NodePort 130 | nodePorts: 131 | http: "{{ ingress_controller.node_port.http }}" 132 | https: "{{ ingress_controller.node_port.https }}" 133 | update_repo_cache: true 134 | wait: true 135 | 136 | - name: Trigger new haproxy configuration 137 | hosts: loadbalancer 138 | become: true 139 | vars_files: 140 | - vars/k8s_cluster.yml 141 | tasks: 142 | - name: Configure HAProxy 143 | when: ingress_controller.install_ingress_controller 144 | block: 145 | - name: Fire up new haproxy template 146 | ansible.builtin.template: 147 | src: templates/haproxy.j2 148 | dest: /etc/haproxy/haproxy.cfg 149 | mode: "0755" 150 | 151 | - name: Restart haproxy 152 | ansible.builtin.systemd: 153 | name: haproxy 154 | state: restarted 155 | 156 | - name: Save new haproxy configuration 157 | hosts: vm_host 158 | become: true 159 | vars_files: 160 | - vars/k8s_cluster.yml 161 | tasks: 162 | - name: Save HAProxy configuration 163 | ansible.builtin.template: 164 | src: templates/haproxy.j2 165 | dest: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}/haproxy.cfg" 166 | mode: "0755" 167 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 2 | 3 | # libvirt-k8s-provisioner - Automate your cluster provisioning from 0 to k8s! 4 | 5 | Welcome to the home of the project! 6 | 7 | With this project, you can build up in minutes a fully working k8s cluster (single master/HA) with as many worker nodes as you want. 8 | 9 | # DISCLAIMER 10 | 11 | It is a hobby project, so it's not supported for production usage, but feel free to open issues and/or contributing to it! 12 | 13 | # How does it work? 14 | 15 | Kubernetes version that is installed can be choosen between: 16 | 17 | - **1.34** - Latest 1.34 release (1.34.2) 18 | - **1.33** - Latest 1.33 release (1.33.6) 19 | - **1.32** - Latest 1.32 release (1.32.10) 20 | - **1.31** - Latest 1.31 release (1.31.14) 21 | 22 | 23 | Terraform will take care of the provisioning via terraform of: 24 | 25 | - Loadbalancer machine with **haproxy** installed and configured for **HA** clusters 26 | - k8s Master(s) VM(s) 27 | - k8s Worker(s) VM(s) 28 | 29 | It also takes care of preparing the host machine with needed packages, configuring: 30 | 31 | - dedicated libvirt dnsmasq configuration 32 | - dedicated libvirt network (fully customizable) 33 | - dedicated libvirt storage pool (fully customizable) 34 | - libvirt-terraform-provider ( compiled and initialized based on [https://github.com/dmacvicar/terraform-provider-libvirt](https://github.com/dmacvicar/terraform-provider-libvirt)) 35 | 36 | You can customize the setup choosing: 37 | 38 | - **container runtime** that you want to use (**cri-o, containerd**). 39 | - **schedulable master** if you want to schedule on your master nodes or leave the taint. 40 | - **service CIDR** to be used during installation. 41 | - **pod CIDR** to be used during installation. 42 | - **network plugin** to be used, based on the documentation. **[Project Calico](https://www.projectcalico.org/calico-networking-for-kubernetes/)** **[Flannel](https://github.com/coreos/flannel)** **[Project Cilium](https://cilium.io/)** 43 | - **additional SANS** to be added to api-server 44 | - **[nginx-ingress-controller](https://kubernetes.github.io/ingress-nginx/)**, **[haproxy-ingress-controller](https://github.com/haproxytech/kubernetes-ingress)** or **[Project Contour](https://projectcontour.io/)** if you want to enable ingress management. 45 | - **[metalLB](https://metallb.universe.tf/)** to manage bare-metal LoadBalancer services - **WIP** - Only L2 configuration can be set-up via playbook. 46 | - **[Rook-Ceph](https://rook.io/docs/rook/v1.4/ceph-storage.html)** - To manage persistent storage, also configurable with single storage node. 47 | 48 | ## All VMs are specular,prepared with: 49 | 50 | - OS: 51 | 52 | - Ubuntu 24.04 LTS Cloud base image [https://cloud-images.ubuntu.com/noble/current/](https://cloud-images.ubuntu.com/noble/current/) 53 | - Centos Stream 9 Generic Cloud base image [https://cloud.centos.org/centos/9-stream/x86_64/images/](https://cloud.centos.org/centos/9-stream/x86_64/images/) 54 | 55 | - cloud-init: 56 | - user: **kube** 57 | - pass: **kuberocks** 58 | - ssh-key: generated during vm-provisioning and stores in the project folder 59 | 60 | The user is capable of logging via SSH too. 61 | 62 | ## Quickstart 63 | 64 | The playbook is meant to be ran against a local host or a remote host that has access to subnets that will be created, defined under **vm_host** group, depending on how many clusters you want to configure at once. 65 | 66 | First of all, you need to install required collections to get started: 67 | 68 | ```bash 69 | ansible-galaxy collection install -r requirements.yml 70 | ``` 71 | 72 | Once the collections are installed, you can simply run the playbook: 73 | 74 | ```bash 75 | ansible-playbook main.yml 76 | ``` 77 | 78 | You can quickly make it work by configuring the needed vars, but you can go straight with the defaults! 79 | 80 | You can also install your cluster using the **Makefile** with: 81 | 82 | To install collections: 83 | 84 | ```bash 85 | make setup 86 | ``` 87 | 88 | To install the cluster: 89 | 90 | ```bash 91 | make create 92 | ``` 93 | 94 | ## Quickstart with Execution Environment 95 | 96 | The playbooks are compatible with the newly introduced **Execution environments (EE)**. To use them with an execution environment you need to have [ansible-builder](https://ansible-builder.readthedocs.io/en/stable/) and [ansible-navigator](https://ansible-navigator.readthedocs.io/en/latest/) installed. 97 | 98 | ### Build EE image 99 | 100 | To build the EE image, jump in the _execution-environment_ folder and run the build: 101 | 102 | ```bash 103 | ansible-builder build -f execution-environment/execution-environment.yml -t k8s-ee 104 | ``` 105 | 106 | ### Run playbooks 107 | 108 | To run the playbooks use ansible navigator: 109 | 110 | ```bash 111 | ansible-navigator run main.yml -m stdout 112 | ``` 113 | 114 | ## Recommended sizing 115 | 116 | Recommended sizings are: 117 | 118 | | Role | vCPU | RAM | 119 | | ------ | ---- | --- | 120 | | master | 2 | 2G | 121 | | worker | 2 | 2G | 122 | 123 | **vars/k8s_cluster.yml** 124 | 125 | ```yaml 126 | 127 | # General configuration 128 | 129 | k8s: 130 | cluster_name: k8s-test 131 | cluster_os: Ubuntu 132 | cluster_version: 1.31 133 | container_runtime: crio 134 | master_schedulable: false 135 | 136 | # Nodes configuration 137 | 138 | control_plane: 139 | vcpu: 2 140 | mem: 2 141 | vms: 3 142 | disk: 30 143 | 144 | worker_nodes: 145 | vcpu: 2 146 | mem: 2 147 | vms: 1 148 | disk: 30 149 | 150 | # Network configuration 151 | 152 | network: 153 | network_cidr: 192.168.200.0/24 154 | domain: k8s.test 155 | additional_san: "" 156 | pod_cidr: 10.20.0.0/16 157 | service_cidr: 10.110.0.0/16 158 | cni_plugin: cilium 159 | 160 | rook_ceph: 161 | install_rook: false 162 | volume_size: 50 163 | rook_cluster_size: 1 164 | 165 | # Ingress controller configuration [nginx/haproxy] 166 | 167 | ingress_controller: 168 | install_ingress_controller: true 169 | type: haproxy 170 | node_port: 171 | http: 31080 172 | https: 31443 173 | 174 | # Section for metalLB setup 175 | 176 | metallb: 177 | install_metallb: false 178 | l2: 179 | iprange: 192.168.200.210-192.168.200.250 180 | ``` 181 | 182 | Size for **disk** and **mem** is in GB. 183 | **disk** allows to provision space in the cloud image for pod's ephemeral storage. 184 | 185 | **cluster_version** can be 1.29, 1.39, 1.31, 1.32 to install the corresponding latest version for the release 186 | 187 | VMS are created with these names by default (customizing them is work in progress): 188 | 189 | ```bash 190 | - **cluster_name**-loadbalancer.**domain** 191 | - **cluster_name**-master-N.**domain** 192 | - **cluster_name**-worker-N.**domain** 193 | ``` 194 | 195 | It is possible to choose **CentOS**/**Ubuntu** as **kubernetes hosts OS** 196 | 197 | ## Multiple clusters - Thanks to @3rd-st-ninja for the input 198 | 199 | Since last release, it is now possible to provision multiple clusters on the same host. Each cluster will be self consistent and will have its own folder under the /**/home/user/k8ssetup/clusters** folder in playbook root folder. 200 | 201 | ```bash 202 | clusters 203 | └── k8s-provisioner 204 | ├── admin.kubeconfig 205 | ├── haproxy.cfg 206 | ├── id_rsa 207 | ├── id_rsa.pub 208 | ├── libvirt-resources 209 | │   ├── libvirt-resources.tf 210 | │   └── terraform.tfstate 211 | ├── loadbalancer 212 | │   ├── cloud_init.cfg 213 | │   ├── k8s-loadbalancer.tf 214 | │   └── terraform.tfstate 215 | ├── masters 216 | │   ├── cloud_init.cfg 217 | │   ├── k8s-master.tf 218 | │   └── terraform.tfstate 219 | ├── workers 220 | │   ├── cloud_init.cfg 221 | │   ├── k8s-workers.tf 222 | │   └── terraform.tfstate 223 | └── workers-rook 224 | ├── cloud_init.cfg 225 | └── k8s-workers.tf 226 | ``` 227 | 228 | In the main folder will be provided a custom script for removing the single cluster, without touching others. 229 | 230 | ```bash 231 | k8s-provisioner-cleanup-playbook.yml 232 | ``` 233 | 234 | As well as a separated inventory for each cluster: 235 | 236 | ```bash 237 | k8s-provisioner-inventory-k8s 238 | ``` 239 | 240 | In order to keep clusters separated, ensure that you use a different **k8s.cluster_name**,**k8s.network.domain** and **k8s.network.network_cidr** variables. 241 | 242 | ## Rook 243 | 244 | **Rook** setup actually creates a dedicated kind of worker, with an additional volume on the VMs that are required. Now it is possible to select the size of Rook cluster using **rook_ceph.rook_cluster_size** variable in the settings. 245 | 246 | ## MetalLB 247 | 248 | Basic setup taken from the documentation. At the moment, the parameter **l2** reports the IPs that can be used (defaults to some IPs in the same subnet of the hosts) as 'external' IPs for accessing the applications 249 | 250 | Suggestion and improvements are highly recommended! 251 | Alex 252 | -------------------------------------------------------------------------------- /10_container_runtimes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install container runtime 3 | hosts: k8s_nodes 4 | become: true 5 | vars_files: 6 | - vars/k8s_cluster.yml 7 | tasks: 8 | - name: Set user home as fact 9 | ansible.builtin.set_fact: 10 | home_dir: "{{ ansible_env.HOME }}" 11 | 12 | - name: Upgrade all packages 13 | ansible.builtin.package: 14 | name: "*" 15 | state: latest # noqa package-latest 16 | 17 | - name: Manage packets when distro is 'Ubuntu' 18 | when: ansible_distribution == 'Ubuntu' 19 | block: 20 | - name: Install pip packages 21 | ansible.builtin.package: 22 | name: 23 | - python3-cryptography 24 | state: present 25 | 26 | - name: Manage packets when distro is 'CentOS' 27 | when: ansible_distribution == 'Fedora' or ansible_distribution == 'CentOS' 28 | block: 29 | - name: Install pip 30 | ansible.builtin.package: 31 | name: 32 | - python3-pip 33 | state: present 34 | 35 | - name: Ensure needed pip packages are present 36 | ansible.builtin.pip: 37 | name: cryptography 38 | executable: "pip3" 39 | 40 | - name: Ensure prerequisites are met. 41 | block: 42 | - name: Add modules to autostart 43 | ansible.builtin.blockinfile: 44 | path: /etc/modules-load.d/k8s.conf 45 | block: | 46 | overlay 47 | br_netfilter 48 | create: true 49 | mode: "0755" 50 | 51 | - name: Enable br_netfilter 52 | community.general.modprobe: 53 | name: "{{ item }}" 54 | state: present 55 | loop: 56 | - br_netfilter 57 | - overlay 58 | 59 | - name: Enable sysctl values 60 | ansible.posix.sysctl: 61 | name: "{{ item.key }}" 62 | value: "{{ item.value }}" 63 | state: present 64 | reload: true 65 | sysctl_set: true 66 | loop: 67 | - key: net.ipv4.ip_forward 68 | value: 1 69 | - key: net.bridge.bridge-nf-call-ip6tables 70 | value: 1 71 | - key: net.bridge.bridge-nf-call-iptables 72 | value: 1 73 | 74 | - name: Fix dead traffic on Systemd 245+ for cilium 75 | when: 76 | - k8s.cluster_os == "Ubuntu" 77 | - k8s.network.cni_plugin == "cilium" 78 | block: 79 | - name: Setup sysctl 80 | ansible.builtin.copy: 81 | dest: /etc/sysctl.d/99-restore-cilium-traffic.conf 82 | content: "net.ipv4.conf.lxc*.rp_filter = 0" 83 | mode: "0755" 84 | 85 | - name: Ensure sysctl is restarted 86 | ansible.builtin.service: 87 | name: systemd-sysctl 88 | state: restarted 89 | 90 | - name: Install cri-o 91 | when: k8s.container_runtime == 'crio' 92 | block: 93 | - name: Ensure required packages for cri-o are installed 94 | ansible.builtin.apt: 95 | name: 96 | - apt-transport-https 97 | - curl 98 | state: present 99 | when: k8s.cluster_os == 'Ubuntu' 100 | 101 | - name: Add crio-repo key 102 | ansible.builtin.apt_key: 103 | url: "{{ crio.ubuntu.crio_key }}" 104 | keyring: "{{ crio.ubuntu.crio_keyring }}" 105 | state: present 106 | when: k8s.cluster_os == 'Ubuntu' 107 | 108 | - name: Ensure the presence of apt-repo for cri-o packages 109 | ansible.builtin.apt_repository: 110 | repo: "{{ crio.ubuntu.crio_repo }}" 111 | filename: "{{ crio.ubuntu.crio_repofile }}" 112 | state: present 113 | when: k8s.cluster_os == 'Ubuntu' 114 | 115 | - name: Temporary fix for memory swap slices on 20.04 116 | ansible.builtin.lineinfile: 117 | path: /etc/default/grub 118 | regexp: "^GRUB_CMDLINE_LINUX" 119 | line: 'GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"' 120 | when: k8s.cluster_os == 'Ubuntu' 121 | 122 | - name: Add cri-o repository 123 | ansible.builtin.yum_repository: 124 | name: cri-o 125 | description: cri-o repo 126 | baseurl: "{{ crio.centos.crio_repo }}" 127 | gpgcheck: true 128 | repo_gpgcheck: true 129 | gpgkey: "{{ crio.centos.crio_key }}" 130 | exclude: crio 131 | when: k8s.cluster_os == 'CentOS' 132 | 133 | - name: Ensure cri-o is installed - CentOS 134 | ansible.builtin.yum: 135 | name: cri-o 136 | state: present 137 | when: k8s.cluster_os == 'CentOS' 138 | 139 | - name: Ensure cri-o is installed - Ubuntu 140 | ansible.builtin.apt: 141 | name: 142 | - cri-o 143 | - crun 144 | state: present 145 | when: 146 | - k8s.cluster_os == 'Ubuntu' 147 | 148 | - name: Fire crio-conf template 149 | ansible.builtin.template: 150 | src: templates/crio.conf.j2 151 | dest: /etc/crio/crio.conf 152 | mode: "0755" 153 | 154 | - name: Fire crio-conf template 155 | ansible.builtin.template: 156 | src: templates/crio.conf.crun.j2 157 | dest: /etc/crio/crio.conf.d/01-crio-runc.conf 158 | mode: "0755" 159 | when: 160 | - k8s.cluster_os == 'Ubuntu' 161 | 162 | - name: Remove example CNI configs 163 | ansible.builtin.file: 164 | path: "/etc/cni/net.d/{{ item }}" 165 | state: absent 166 | loop: 167 | - 100-crio-bridge.conf 168 | - 200-loopback.conf 169 | 170 | - name: Force systemd to reread configs 171 | ansible.builtin.systemd: 172 | daemon_reload: true 173 | 174 | - name: Ensure cri-o is enabled and started 175 | ansible.builtin.systemd: 176 | name: crio 177 | state: started 178 | enabled: true 179 | 180 | - name: Ensure containerd is configured and installed on CentOS machine 181 | when: 182 | - k8s.container_runtime == 'containerd' 183 | - k8s.cluster_os == 'CentOS' 184 | block: 185 | - name: Ensure required packages are present 186 | ansible.builtin.yum: 187 | name: 188 | - yum-utils 189 | - device-mapper-persistent-data 190 | - lvm2 191 | state: present 192 | 193 | - name: Add containerd repository 194 | ansible.builtin.get_url: 195 | url: "{{ containerd.centos.containerd_repo }}" 196 | dest: "/etc/yum.repos.d/docker-ce.repo" 197 | mode: "0755" 198 | 199 | - name: Ensure containerd is installed 200 | ansible.builtin.yum: 201 | name: containerd.io 202 | state: present 203 | 204 | - name: Setup containerd on Ubuntu 205 | when: 206 | - k8s.container_runtime == 'containerd' 207 | - k8s.cluster_os == 'Ubuntu' 208 | block: 209 | - name: Add containerd repo key 210 | ansible.builtin.apt_key: 211 | url: "{{ item.key }}" 212 | keyring: "{{ item.keyring }}" 213 | state: present 214 | loop: 215 | - key: "{{ containerd.ubuntu.containerd_repo_key }}" 216 | keyring: "{{ containerd.ubuntu.containerd_repo_key_file }}" 217 | 218 | - name: Ensure the presence of apt-repo for containerd 219 | ansible.builtin.apt_repository: 220 | repo: "{{ item.repo }}" 221 | filename: "{{ item.file }}" 222 | state: present 223 | loop: 224 | - repo: "{{ containerd.ubuntu.containerd_repo }}" 225 | file: /etc/apt/sources.list.d/docker.list 226 | 227 | - name: Ensure containerd is configured and installed on Ubuntu machine 228 | ansible.builtin.apt: 229 | name: containerd.io 230 | state: latest # noqa package-latest 231 | update_cache: true 232 | 233 | - name: Ensure containerd service is configured 234 | when: k8s.container_runtime == 'containerd' 235 | block: 236 | - name: Create /etc/containers 237 | ansible.builtin.file: 238 | state: directory 239 | path: /etc/containerd 240 | mode: "0755" 241 | 242 | - name: Initialize config # noqa no-changed-when 243 | ansible.builtin.shell: containerd config default > /etc/containerd/config.toml 244 | 245 | - name: Configure containerd to work with systemd 246 | ansible.builtin.replace: 247 | path: /etc/containerd/config.toml 248 | regexp: "SystemdCgroup = false" 249 | replace: "SystemdCgroup = true" 250 | 251 | - name: Force systemd to reread configs 252 | ansible.builtin.systemd: 253 | daemon_reload: true 254 | 255 | - name: Ensure containerd is enabled and started 256 | ansible.builtin.service: 257 | name: containerd 258 | state: restarted 259 | enabled: true 260 | 261 | - name: Reboot nodes before proceeding 262 | ansible.builtin.reboot: 263 | -------------------------------------------------------------------------------- /04_provisioning_vms.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: This play provisions k8s VMs based on intial config 3 | hosts: vm_host 4 | vars_files: 5 | - vars/k8s_cluster.yml 6 | tasks: 7 | - name: Enumerate Nodes 8 | ansible.builtin.set_fact: 9 | cp_nodes_count: "{{ k8s.control_plane.vms }}" 10 | worker_nodes_count: "{{ k8s.worker_nodes.vms - (rook_ceph.rook_cluster_size if rook_ceph.install_rook else 0) }}" 11 | rook_nodes_count: "{{ rook_ceph.rook_cluster_size if rook_ceph.install_rook else 0 }}" 12 | loadbalancer_count: "{{ 1 if k8s.control_plane.vms > 1 or k8s.worker_nodes.vms > 1 else 0 }}" 13 | 14 | - name: Ensure cluster VMs are in place 15 | community.general.terraform: 16 | complex_vars: true 17 | project_path: "{{ workspace_directory.base_path }}/clusters/{{ k8s.cluster_name | default('k8s-test', true) }}" 18 | force_init: true 19 | variables: 20 | pool_name: "{{ k8s.cluster_name | default('k8s-test', true) }}" 21 | network_name: "{{ k8s.cluster_name | default('k8s-test', true) }}" 22 | network_domain: "{{ k8s.network.domain | default('k8s.test', true) }}" 23 | network_cidr: '{{ k8s.network.network_cidr | default("192.168.200.0/24", true) | split(",") }}' 24 | network_dns_enabled: true 25 | network_dns_local: true 26 | network_dhcp_enabled: true 27 | network_dnsmasq_options: 28 | server: "/{{ k8s.network.domain | default('k8s.test', true) }}/{{ k8s.network.network_cidr | default('192.168.200.0/24', true) | ansible.utils.next_nth_usable(1) }}" # noqa yaml[line-length] 29 | os_firmware: "{{ '/usr/share/OVMF/OVMF_CODE_4M.fd' if ansible_distribution == 'Ubuntu' else '/usr/share/edk2/ovmf/OVMF_CODE.fd' }}" 30 | master_instance_hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-master" 31 | master_instance_domain: "{{ k8s.network.domain | default('k8s.test', true) }}" 32 | master_instance_libvirt_pool: "{{ k8s.cluster_name | default('k8s-test', true) }}" 33 | master_instance_volume_size: "{{ k8s.control_plane.disk }}" 34 | master_instance_cpu: "{{ k8s.control_plane.vcpu }}" 35 | master_instance_memory: "{{ k8s.control_plane.mem }}" 36 | master_instance_count: "{{ k8s.control_plane.vms }}" 37 | master_instance_cloud_image: "/tmp/{{ image_name }}.qcow2" 38 | master_instance_uefi_enabled: "{{ false if k8s.cluster_os == 'CentOS' else true }}" 39 | master_instance_cloud_user: 40 | username: '{{ k8s.vm_user | default("kube", true) }}' 41 | password: '{{ k8s.vm_password | default("kuberocks", true) | password_hash("sha512", rounds=4096) }}' 42 | sshkey: "{{ k8s_key_pub | trim }}" 43 | master_instance_network_interfaces: 44 | - interface_network: "{{ k8s.cluster_name | default('k8s-test', true) }}" 45 | worker_instance_hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-worker" 46 | worker_instance_domain: "{{ k8s.network.domain | default('k8s.test', true) }}" 47 | worker_instance_libvirt_pool: "{{ k8s.cluster_name | default('k8s-test', true) }}" 48 | worker_instance_volume_size: "{{ k8s.worker_nodes.disk }}" 49 | worker_instance_cpu: "{{ k8s.worker_nodes.vcpu }}" 50 | worker_instance_memory: "{{ k8s.worker_nodes.mem }}" 51 | worker_instance_count: "{{ (k8s.worker_nodes.vms - rook_ceph.rook_cluster_size) if rook_ceph.install_rook else k8s.worker_nodes.vms }}" 52 | worker_instance_uefi_enabled: "{{ false if k8s.cluster_os == 'CentOS' else true }}" 53 | worker_instance_cloud_image: "/tmp/{{ image_name }}.qcow2" 54 | worker_instance_cloud_user: 55 | username: '{{ k8s.vm_user | default("kube", true) }}' 56 | password: '{{ k8s.vm_password | default("kuberocks", true) | password_hash("sha512", rounds=4096) }}' 57 | sshkey: "{{ k8s_key_pub | trim }}" 58 | worker_instance_network_interfaces: 59 | - interface_network: "{{ k8s.cluster_name | default('k8s-test', true) }}" 60 | worker_rook_enabled: "{{ rook_ceph.install_rook }}" 61 | worker_rook_instance_hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-worker-rook" 62 | worker_rook_instance_domain: "{{ k8s.network.domain | default('k8s.test', true) }}" 63 | worker_rook_instance_libvirt_pool: "{{ k8s.cluster_name | default('k8s-test', true) }}" 64 | worker_rook_instance_volume_size: "{{ k8s.worker_nodes.disk }}" 65 | worker_rook_instance_cpu: "{{ k8s.worker_nodes.vcpu }}" 66 | worker_rook_instance_memory: "{{ k8s.worker_nodes.mem }}" 67 | worker_rook_instance_count: "{{ rook_ceph.rook_cluster_size }}" 68 | worker_rook_instance_uefi_enabled: "{{ false if k8s.cluster_os == 'CentOS' else true }}" 69 | worker_rook_instance_cloud_image: "/tmp/{{ image_name }}.qcow2" 70 | worker_rook_instance_additional_volume_size: "{{ rook_ceph.volume_size }}" 71 | worker_rook_instance_cloud_user: 72 | username: '{{ k8s.vm_user | default("kube", true) }}' 73 | password: '{{ k8s.vm_password | default("kuberocks", true) | password_hash("sha512", rounds=4096) }}' 74 | sshkey: "{{ k8s_key_pub | trim }}" 75 | worker_rook_instance_network_interfaces: 76 | - interface_network: "{{ k8s.cluster_name | default('k8s-test', true) }}" 77 | loadbalancer_enabled: "{{ loadbalancer_count | bool }}" 78 | loadbalancer_instance_hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-loadbalancer" 79 | loadbalancer_instance_domain: "{{ k8s.network.domain | default('k8s.test', true) }}" 80 | loadbalancer_instance_libvirt_pool: "{{ k8s.cluster_name | default('k8s-test', true) }}" 81 | loadbalancer_instance_volume_size: 25 82 | loadbalancer_instance_cpu: 1 83 | loadbalancer_instance_memory: 4 84 | loadbalancer_instance_count: "{{ loadbalancer_count }}" 85 | loadbalancer_instance_uefi_enabled: "{{ false if k8s.cluster_os == 'CentOS' else true }}" 86 | loadbalancer_instance_cloud_image: "/tmp/{{ image_name }}.qcow2" 87 | loadbalancer_instance_cloud_user: 88 | username: '{{ k8s.vm_user | default("kube", true) }}' 89 | password: '{{ k8s.vm_password | default("kuberocks", true) | password_hash("sha512", rounds=4096) }}' 90 | sshkey: "{{ k8s_key_pub | trim }}" 91 | loadbalancer_instance_network_interfaces: 92 | - interface_network: "{{ k8s.cluster_name | default('k8s-test', true) }}" 93 | state: present 94 | become: true 95 | register: output_terraform 96 | 97 | - name: Add masters to given group 98 | ansible.builtin.add_host: 99 | hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-master{{ '-' ~ item if cp_nodes_count | int > 1 else '' }}.{{ k8s.network.domain | default('k8s.test', true) }}" # noqa yaml[line-length] 100 | ansible_ssh_private_key_file: "{{ playbook_dir }}//id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}" 101 | ansible_user: kube 102 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no" 103 | groups: 104 | - "masters" 105 | group_children: "k8s_nodes" 106 | loop: "{{ range(0, k8s.control_plane.vms) | list }}" 107 | delegate_to: localhost 108 | 109 | - name: Add workers to given group 110 | ansible.builtin.add_host: 111 | hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-worker{{ '-' ~ item if worker_nodes_count | int > 1 else '' }}.{{ k8s.network.domain | default('k8s.test', true) }}" # noqa yaml[line-length] 112 | ansible_ssh_private_key_file: "{{ playbook_dir }}//id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}" 113 | ansible_user: kube 114 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no" 115 | groups: 116 | - "workers" 117 | group_children: "k8s_nodes" 118 | loop: "{{ range(0, worker_nodes_count | int) | list }}" 119 | delegate_to: localhost 120 | 121 | - name: Add rook workers to given group 122 | ansible.builtin.add_host: 123 | hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-worker-rook{{ '-' ~ item if rook_nodes_count | int > 1 else '' }}.{{ k8s.network.domain | default('k8s.test', true) }}" # noqa yaml[line-length] 124 | ansible_ssh_private_key_file: "{{ playbook_dir }}//id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}" 125 | ansible_user: kube 126 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no" 127 | groups: 128 | - "workers" 129 | group_children: "k8s_nodes" 130 | loop: "{{ range(0, rook_nodes_count | int) | list }}" 131 | delegate_to: localhost 132 | when: rook_ceph.install_rook 133 | 134 | - name: Add loadbalancer to inventory 135 | when: loadbalancer_count | bool 136 | ansible.builtin.add_host: 137 | hostname: "{{ k8s.cluster_name | default('k8s-test', true) }}-loadbalancer.{{ k8s.network.domain | default('k8s.test', true) }}" 138 | ansible_ssh_private_key_file: "{{ playbook_dir }}//id_rsa_{{ k8s.cluster_name | default('k8s-test', true) }}" 139 | ansible_user: kube 140 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no" 141 | groups: 142 | - "loadbalancer" 143 | delegate_to: localhost 144 | 145 | - name: Ensure to clean known_hosts 146 | ansible.builtin.known_hosts: 147 | host: "{{ item }}" 148 | path: ~/.ssh/known_hosts 149 | state: absent 150 | loop: "{{ query('inventory_hostnames', 'all:!vm_host') }}" 151 | delegate_to: localhost 152 | 153 | - name: Check connection and set facts 154 | hosts: masters,workers,loadbalancer 155 | gather_facts: false 156 | tasks: 157 | - name: Wait 600 seconds for target connection to become reachable/usable 158 | ansible.builtin.wait_for_connection: 159 | timeout: 600 160 | delay: 0 161 | 162 | - name: Wait for cloud init to finish 163 | community.general.cloud_init_data_facts: 164 | filter: status 165 | register: cloud_init_state 166 | until: "cloud_init_state.cloud_init_data_facts.status.v1.stage is defined and not cloud_init_state.cloud_init_data_facts.status.v1.stage" 167 | retries: 50 168 | delay: 5 169 | 170 | - name: Ping host to check status 171 | ansible.builtin.ping: 172 | 173 | - name: Add hosts to correct groups 174 | ansible.builtin.group_by: 175 | key: "{{ group_children }}" 176 | when: group_children is defined 177 | 178 | - name: Extract facts from setup 179 | ansible.builtin.setup: 180 | register: machine_facts 181 | 182 | - name: Set relevant facts for the host 183 | ansible.builtin.set_fact: 184 | host_ip: "{{ machine_facts.ansible_facts.ansible_default_ipv4.address }}" 185 | host_interface: "{{ machine_facts.ansible_facts.ansible_default_ipv4.interface }}" 186 | host_mac: "{{ machine_facts.ansible_facts.ansible_default_ipv4.macaddress }}" 187 | host_fqdn: "{{ machine_facts.ansible_facts.ansible_fqdn }}" 188 | -------------------------------------------------------------------------------- /files/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "network_autostart" { 2 | description = "Whether to autostart the libvirt network" 3 | type = bool 4 | default = true 5 | } 6 | 7 | variable "network_name" { 8 | description = "Name of the libvirt network" 9 | type = string 10 | } 11 | 12 | variable "network_mode" { 13 | description = "Mode of the libvirt network" 14 | type = string 15 | default = "nat" 16 | } 17 | 18 | variable "network_domain" { 19 | description = "Domain of the libvirt network" 20 | type = string 21 | default = null 22 | } 23 | 24 | variable "network_cidr" { 25 | description = "CIDR for the libvirt network" 26 | type = list(string) 27 | default = [ "192.168.122.0/24" ] 28 | } 29 | 30 | variable "network_bridge" { 31 | description = "Bridge for the libvirt network" 32 | type = string 33 | default = null 34 | } 35 | 36 | variable "network_mtu" { 37 | description = "MTU for the libvirt network" 38 | type = number 39 | default = null 40 | } 41 | 42 | variable "network_dhcp_enabled" { 43 | description = "Whether DHCP is enabled for the libvirt network" 44 | type = bool 45 | default = false 46 | } 47 | 48 | variable "network_dns_local" { 49 | description = "Whether DNS is local-only for the libvirt network" 50 | type = bool 51 | default = false 52 | } 53 | 54 | variable "network_dns_enabled" { 55 | description = "Whether DNS is enabled for the libvirt network" 56 | type = bool 57 | default = false 58 | } 59 | 60 | variable "network_dnsmasq_options" { 61 | description = "Map of dnsmasq options for the libvirt network" 62 | type = map(string) 63 | default = {} 64 | } 65 | 66 | variable "network_dns_entries" { 67 | description = "Map of DNS entries for the libvirt network" 68 | type = map(string) 69 | default = {} 70 | } 71 | 72 | variable "network_routes" { 73 | description = "Map of routes for the libvirt network (format CIDR = gateway --> '10.0.0.1/24' = '10.0.0.1' )" 74 | type = map(string) 75 | default = {} 76 | } 77 | 78 | variable "pool_name" { 79 | description = "Name of the libvirt pool" 80 | type = string 81 | } 82 | 83 | variable "pool_path" { 84 | description = "Path for the libvirt pool" 85 | type = string 86 | default = "/var/lib/libvirt/images" 87 | } 88 | 89 | variable "os_firmware" { 90 | type = string 91 | default = "/usr/share/edk2/ovmf/OVMF_CODE.fd" 92 | description = "Path to the ovmf firmware on the host machine. Ubuntu=/usr/share/OVMF/OVMF_CODE.fd" 93 | } 94 | 95 | 96 | variable "loadbalancer_instance_count" { 97 | type = number 98 | default = 1 99 | description = "Number of instances to create" 100 | } 101 | 102 | variable "loadbalancer_instance_cloud_image" { 103 | type = string 104 | description = "Cloud image to use for instance provisioning" 105 | default = "" 106 | } 107 | 108 | variable "loadbalancer_instance_additional_volume_size" { 109 | type = number 110 | description = "Additional block device size" 111 | default = 0 112 | } 113 | 114 | variable "loadbalancer_instance_hostname" { 115 | type = string 116 | default = "service-vm" 117 | description = "Hostname to assign the istance via cloud-init" 118 | } 119 | 120 | variable "loadbalancer_instance_domain" { 121 | type = string 122 | default = "example.com" 123 | description = "Hostname to assign the istance via cloud-init" 124 | } 125 | 126 | variable "loadbalancer_instance_cpu" { 127 | type = number 128 | default = 2 129 | description = "Number of CPUs to configure for the instance" 130 | } 131 | 132 | variable "loadbalancer_instance_memory" { 133 | type = number 134 | default = 4 135 | description = "Instance memory size, in GB" 136 | } 137 | 138 | variable "loadbalancer_instance_volume_size" { 139 | type = number 140 | default = 20 141 | description = "Instance memory size, in GB" 142 | } 143 | 144 | variable "loadbalancer_instance_cloud_user" { 145 | type = object({ 146 | username = string 147 | password = string 148 | sshkey = optional(string) 149 | }) 150 | 151 | default = { 152 | username = "sysadmin" 153 | password = "redhat" 154 | sshkey = "" 155 | } 156 | } 157 | 158 | 159 | variable "loadbalancer_instance_network_interfaces" { 160 | type = list(object({ 161 | interface_network = string 162 | interface_mac_address = optional(string) 163 | interface_addresses = optional(list(string), []) 164 | interface_hostname = optional(string) 165 | interface_wait_for_lease = optional(bool, true) 166 | }) 167 | ) 168 | default = [{ 169 | interface_network = "default" 170 | } 171 | ] 172 | description = "A list of network interfaces to add to the instance" 173 | } 174 | 175 | variable "loadbalancer_instance_libvirt_pool" { 176 | type = string 177 | description = "The libvirt pool to attach the instance to" 178 | default = "default" 179 | } 180 | 181 | variable "loadbalancer_instance_uefi_enabled" { 182 | type = bool 183 | default = true 184 | description = "Set this to true if OS should be installed via ISO" 185 | } 186 | 187 | variable "loadbalancer_enabled" { 188 | type = bool 189 | default = false 190 | description = "Set this to true if OS should be installed via ISO" 191 | } 192 | 193 | 194 | variable "master_instance_count" { 195 | type = number 196 | default = 1 197 | description = "Number of instances to create" 198 | } 199 | 200 | variable "master_instance_cloud_image" { 201 | type = string 202 | description = "Cloud image to use for instance provisioning" 203 | default = "" 204 | } 205 | 206 | variable "master_instance_hostname" { 207 | type = string 208 | default = "service-vm" 209 | description = "Hostname to assign the istance via cloud-init" 210 | } 211 | 212 | variable "master_instance_domain" { 213 | type = string 214 | default = "example.com" 215 | description = "Hostname to assign the istance via cloud-init" 216 | } 217 | 218 | variable "master_instance_cpu" { 219 | type = number 220 | default = 2 221 | description = "Number of CPUs to configure for the instance" 222 | } 223 | 224 | variable "master_instance_memory" { 225 | type = number 226 | default = 4 227 | description = "Instance memory size, in GB" 228 | } 229 | 230 | variable "master_instance_volume_size" { 231 | type = number 232 | default = 20 233 | description = "Instance memory size, in GB" 234 | } 235 | 236 | variable "master_instance_cloud_user" { 237 | type = object({ 238 | username = string 239 | password = string 240 | sshkey = optional(string) 241 | }) 242 | 243 | default = { 244 | username = "sysadmin" 245 | password = "redhat" 246 | sshkey = "" 247 | } 248 | } 249 | 250 | variable "master_instance_network_interfaces" { 251 | type = list(object({ 252 | interface_network = string 253 | interface_mac_address = optional(string) 254 | interface_addresses = optional(list(string), []) 255 | interface_hostname = optional(string) 256 | interface_wait_for_lease = optional(bool, true) 257 | }) 258 | ) 259 | default = [{ 260 | interface_network = "default" 261 | } 262 | ] 263 | description = "A list of network interfaces to add to the instance" 264 | } 265 | 266 | variable "master_instance_libvirt_pool" { 267 | type = string 268 | description = "The libvirt pool to attach the instance to" 269 | default = "default" 270 | } 271 | 272 | variable "master_instance_uefi_enabled" { 273 | type = bool 274 | default = true 275 | description = "Set this to true if OS should be installed via ISO" 276 | } 277 | 278 | 279 | variable "worker_instance_count" { 280 | type = number 281 | default = 1 282 | description = "Number of instances to create" 283 | } 284 | 285 | variable "worker_instance_cloud_image" { 286 | type = string 287 | description = "Cloud image to use for instance provisioning" 288 | default = "" 289 | } 290 | 291 | variable "worker_instance_hostname" { 292 | type = string 293 | default = "service-vm" 294 | description = "Hostname to assign the istance via cloud-init" 295 | } 296 | 297 | variable "worker_instance_domain" { 298 | type = string 299 | default = "example.com" 300 | description = "Hostname to assign the istance via cloud-init" 301 | } 302 | 303 | variable "worker_instance_cpu" { 304 | type = number 305 | default = 2 306 | description = "Number of CPUs to configure for the instance" 307 | } 308 | 309 | variable "worker_instance_memory" { 310 | type = number 311 | default = 4 312 | description = "Instance memory size, in GB" 313 | } 314 | 315 | variable "worker_instance_volume_size" { 316 | type = number 317 | default = 20 318 | description = "Instance memory size, in GB" 319 | } 320 | 321 | variable "worker_instance_cloud_user" { 322 | type = object({ 323 | username = string 324 | password = string 325 | sshkey = optional(string) 326 | }) 327 | 328 | default = { 329 | username = "sysadmin" 330 | password = "redhat" 331 | sshkey = "" 332 | } 333 | } 334 | 335 | variable "worker_instance_network_interfaces" { 336 | type = list(object({ 337 | interface_network = string 338 | interface_mac_address = optional(string) 339 | interface_addresses = optional(list(string), []) 340 | interface_hostname = optional(string) 341 | interface_wait_for_lease = optional(bool, true) 342 | }) 343 | ) 344 | default = [{ 345 | interface_network = "default" 346 | } 347 | ] 348 | description = "A list of network interfaces to add to the instance" 349 | } 350 | 351 | variable "worker_instance_libvirt_pool" { 352 | type = string 353 | description = "The libvirt pool to attach the instance to" 354 | default = "default" 355 | } 356 | 357 | variable "worker_instance_uefi_enabled" { 358 | type = bool 359 | default = true 360 | description = "Set this to true if OS should be installed via ISO" 361 | } 362 | 363 | variable "worker_rook_instance_count" { 364 | type = number 365 | default = 1 366 | description = "Number of instances to create" 367 | } 368 | 369 | variable "worker_rook_instance_cloud_image" { 370 | type = string 371 | description = "Cloud image to use for instance provisioning" 372 | default = "" 373 | } 374 | 375 | variable "worker_rook_instance_additional_volume_size" { 376 | type = number 377 | description = "Additional block device size" 378 | default = 0 379 | } 380 | 381 | variable "worker_rook_instance_hostname" { 382 | type = string 383 | default = "service-vm" 384 | description = "Hostname to assign the istance via cloud-init" 385 | } 386 | 387 | variable "worker_rook_instance_domain" { 388 | type = string 389 | default = "example.com" 390 | description = "Hostname to assign the istance via cloud-init" 391 | } 392 | 393 | variable "worker_rook_instance_cpu" { 394 | type = number 395 | default = 2 396 | description = "Number of CPUs to configure for the instance" 397 | } 398 | 399 | variable "worker_rook_instance_memory" { 400 | type = number 401 | default = 4 402 | description = "Instance memory size, in GB" 403 | } 404 | 405 | variable "worker_rook_instance_volume_size" { 406 | type = number 407 | default = 20 408 | description = "Instance memory size, in GB" 409 | } 410 | 411 | variable "worker_rook_instance_cloud_user" { 412 | type = object({ 413 | username = string 414 | password = string 415 | sshkey = optional(string) 416 | }) 417 | 418 | default = { 419 | username = "sysadmin" 420 | password = "redhat" 421 | sshkey = "" 422 | } 423 | } 424 | 425 | variable "worker_rook_instance_network_interfaces" { 426 | type = list(object({ 427 | interface_network = string 428 | interface_mac_address = optional(string) 429 | interface_addresses = optional(list(string), []) 430 | interface_hostname = optional(string) 431 | interface_wait_for_lease = optional(bool, true) 432 | }) 433 | ) 434 | default = [{ 435 | interface_network = "default" 436 | } 437 | ] 438 | description = "A list of network interfaces to add to the instance" 439 | } 440 | 441 | 442 | variable "worker_rook_instance_libvirt_pool" { 443 | type = string 444 | description = "The libvirt pool to attach the instance to" 445 | default = "default" 446 | } 447 | 448 | variable "worker_rook_instance_uefi_enabled" { 449 | type = bool 450 | default = true 451 | description = "Set this to true if OS should be installed via ISO" 452 | } 453 | 454 | variable "worker_rook_enabled" { 455 | type = bool 456 | default = false 457 | description = "Set this to true if OS should be installed via ISO" 458 | } 459 | --------------------------------------------------------------------------------