├── ansible.cfg ├── main.yml ├── .gitignore ├── kubernetes-setup ├── vars.yml ├── k8s_worker_node.yml ├── k8s_setup.yml ├── haproxy.yml ├── k8s_control_plane_join.yml ├── k8s_control_plane.yml └── k8s_common.yml ├── hosts.ini ├── Vagrantfile └── README.md /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = hosts.ini 3 | host_key_checking = False 4 | -------------------------------------------------------------------------------- /main.yml: -------------------------------------------------------------------------------- 1 | - name: Include playbook Kubernetes 2 | ansible.builtin.import_playbook: kubernetes-setup/k8s_setup.yml -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vagrant/ 3 | kubernetes-setup/join-command 4 | kubernetes-setup/admin.conf 5 | kubernetes-setup/node-playbook.retry 6 | kubernetes-setup/certificate-key 7 | -------------------------------------------------------------------------------- /kubernetes-setup/vars.yml: -------------------------------------------------------------------------------- 1 | # k8s_version: "1.30" # You can use this way to install the latest minor version 2 | k8s_version: "1.30.1-1.1" # You can use this way to install exacly version 3 | k8s_major_version: "1.30" 4 | calico_version: "3.28.0" 5 | -------------------------------------------------------------------------------- /kubernetes-setup/k8s_worker_node.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Worker Node 3 | # 4 | - hosts: worker 5 | become: yes 6 | vars_files: 7 | - vars.yml 8 | vars: 9 | saved_join_command: "{{ lookup('file', './join-command') }}" 10 | tasks: 11 | 12 | # Join server on k8s Cluster as Worker Node 13 | - name: Run the saved join command on another server 14 | command: "{{ saved_join_command }}" 15 | 16 | 17 | -------------------------------------------------------------------------------- /kubernetes-setup/k8s_setup.yml: -------------------------------------------------------------------------------- 1 | - name: Include playbook HAproxy loadbalancer to Kubernetes Control Plane HA 2 | ansible.builtin.import_playbook: haproxy.yml 3 | 4 | - name: Include playbook kubernetes Common 5 | ansible.builtin.import_playbook: k8s_common.yml 6 | 7 | - name: Include playbook kubernetes Control Plane 8 | ansible.builtin.import_playbook: k8s_control_plane.yml 9 | 10 | - name: Include playbook kubernetes Control Plane Join 11 | ansible.builtin.import_playbook: k8s_control_plane_join.yml 12 | 13 | - name: Include playbook kubernetes Worker Node 14 | ansible.builtin.import_playbook: k8s_worker_node.yml 15 | -------------------------------------------------------------------------------- /hosts.ini: -------------------------------------------------------------------------------- 1 | haproxy ansible_host=192.168.56.9 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/haproxy/virtualbox/private_key 2 | 3 | [controlplanefirst] 4 | controlplane-1 ansible_host=192.168.56.11 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/controlplane-1/virtualbox/private_key 5 | 6 | [controlplaneha] 7 | controlplane-2 ansible_host=192.168.56.12 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/controlplane-2/virtualbox/private_key 8 | ;controlplane3 ansible_host=192.168.56.13 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/controlplane-3/virtualbox/private_key 9 | 10 | [worker] 11 | node-1 ansible_host=192.168.56.21 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/node-1/virtualbox/private_key 12 | node-2 ansible_host=192.168.56.22 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/node-2/virtualbox/private_key 13 | ;node-3 ansible_host=192.168.56.23 ansible_ssh_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/node-3/virtualbox/private_key 14 | -------------------------------------------------------------------------------- /kubernetes-setup/haproxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: haproxy 3 | become: yes 4 | tasks: 5 | - name: Update apt cache 6 | apt: 7 | update_cache: yes 8 | 9 | - name: Install HAProxy 10 | apt: 11 | name: haproxy 12 | state: present 13 | 14 | - name: Configure HAProxy 15 | copy: 16 | dest: /etc/haproxy/haproxy.cfg 17 | content: | 18 | frontend k8s-api 19 | bind *:6443 20 | mode tcp 21 | default_backend k8s-api 22 | timeout client 30s 23 | 24 | backend k8s-api 25 | mode tcp 26 | option tcp-check 27 | balance roundrobin 28 | default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 29 | server controlplane-1 10.0.2.15:6443 check 30 | server controlplane-3 192.168.56.11:6443 check 31 | server controlplane-2 192.168.56.12:6443 check 32 | timeout connect 5s 33 | timeout server 30s 34 | 35 | 36 | - name: Restart HAProxy 37 | systemd: 38 | name: haproxy 39 | state: restarted -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | IMAGE_NAME = "ubuntu/jammy64" # ubuntu 22.04 2 | QTD_CONTROL_PLANES = 2 3 | QTD_WORKER_NODES = 2 4 | 5 | Vagrant.configure("2") do |config| 6 | 7 | config.vm.define "haproxy" do |haproxy| 8 | haproxy.vm.box = IMAGE_NAME 9 | haproxy.vm.network "private_network", ip: "192.168.56.9" 10 | haproxy.vm.hostname = "haproxy" 11 | haproxy.vm.provider "virtualbox" do |v| 12 | v.memory = 512 13 | v.cpus = 1 14 | end 15 | end 16 | 17 | config.vm.provider "virtualbox" do |v| 18 | v.memory = 2048 19 | v.cpus = 2 20 | end 21 | 22 | (1..QTD_CONTROL_PLANES).each do |i| 23 | config.vm.define "controlplane-#{i}" do |master| 24 | master.vm.box = IMAGE_NAME 25 | master.vm.network "private_network", ip: "192.168.56.#{i + 10}" 26 | master.vm.hostname = "controlplane-#{i}" 27 | privileged = true 28 | end 29 | end 30 | 31 | (1..QTD_WORKER_NODES).each do |i| 32 | config.vm.define "node-#{i}" do |worker| 33 | worker.vm.box = IMAGE_NAME 34 | worker.vm.network "private_network", ip: "192.168.56.#{i + 20}" 35 | worker.vm.hostname = "node-#{i}" 36 | privileged = true 37 | end 38 | end 39 | 40 | end 41 | -------------------------------------------------------------------------------- /kubernetes-setup/k8s_control_plane_join.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Worker Node 3 | # 4 | - hosts: controlplaneha 5 | become: yes 6 | vars_files: 7 | - vars.yml 8 | vars: 9 | saved_join_command: "{{ lookup('file', './join-command') }}" 10 | saved_certificate_key: "{{ lookup('file', './certificate-key') }}" 11 | tasks: 12 | 13 | - name: Check if Kubernetes is already initialized 14 | become: yes 15 | stat: 16 | path: /join-cmd 17 | register: kubeadm_join_check 18 | 19 | - name: Print Control Plane Join command 20 | debug: 21 | msg: "{{ saved_join_command }} --control-plane --certificate-key {{ saved_certificate_key }} --apiserver-advertise-address={{ ansible_host }}" 22 | when: not kubeadm_join_check.stat.exists 23 | 24 | # Join server on k8s Cluster as New Control Plance 25 | - name: Run the saved join command with certificate-key to join as control-plane role 26 | command: "{{ saved_join_command }} --control-plane --certificate-key {{ saved_certificate_key }} --apiserver-advertise-address={{ ansible_host }}" 27 | when: not kubeadm_join_check.stat.exists 28 | 29 | 30 | - name: Save certificate-key to file on server 31 | copy: 32 | content: "{{ saved_join_command }} --control-plane --certificate-key {{ saved_certificate_key }} --apiserver-advertise-address={{ ansible_host }}" 33 | dest: /join-cmd 34 | when: not kubeadm_join_check.stat.exists 35 | 36 | -------------------------------------------------------------------------------- /kubernetes-setup/k8s_control_plane.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Control Plane / Master Node 3 | # 4 | - hosts: controlplanefirst 5 | become: yes 6 | vars_files: 7 | - vars.yml 8 | tasks: 9 | 10 | ######### Init K8s Cluster ############### 11 | - name: Check if Kubernetes is already initialized 12 | become: yes 13 | stat: 14 | path: /etc/kubernetes/admin.conf 15 | register: kubeadm_init_check 16 | 17 | # - name: Print Join command 18 | # debug: 19 | # msg: "kubeadm init --node-name k8s-master --pod-network-cidr=192.168.56.0/21 --apiserver-advertise-address={{ ansible_default_ipv4.address }}" 20 | 21 | - name: Initialize the Kubernetes cluster using kubeadm 22 | become: yes 23 | # one master only 24 | # command: kubeadm init --node-name k8s-master --pod-network-cidr=192.168.56.0/21 --apiserver-advertise-address=192.168.56.10 #--apiserver-cert-extra-sans="192.168.56.10" 25 | # HA 26 | command: kubeadm init --control-plane-endpoint "192.168.56.9:6443" --upload-certs --pod-network-cidr=192.168.56.0/21 --apiserver-advertise-address={{ ansible_host }} 27 | when: not kubeadm_init_check.stat.exists 28 | 29 | 30 | ######### kube config ############### 31 | - name: Check if kube config file is already created 32 | become: yes 33 | stat: 34 | path: /home/vagrant/.kube/config 35 | register: kube_config_check 36 | 37 | - name: Create kube directory 38 | file: 39 | path: /home/vagrant/.kube 40 | state: directory 41 | when: not kube_config_check.stat.exists 42 | 43 | - name: Setup kubeconfig for vagrant user 44 | copy: 45 | src: /etc/kubernetes/admin.conf 46 | dest: /home/vagrant/.kube/config 47 | remote_src: yes 48 | owner: vagrant 49 | group: vagrant 50 | mode: '0644' 51 | when: not kube_config_check.stat.exists 52 | 53 | - name: Fetch the admin.conf file to local machine 54 | fetch: 55 | src: /etc/kubernetes/admin.conf 56 | dest: ./admin.conf 57 | flat: yes 58 | 59 | ######### Generate K8s Cluster certificate-key for control plane join command ############### 60 | - name: Check if certificate-key already exists 61 | stat: 62 | path: "/certificate-key" 63 | register: certificate_key_plane_check 64 | 65 | - name: Generate certificate-key 66 | shell: kubeadm init phase upload-certs --upload-certs | tail -n1 67 | register: certificate_key_plane 68 | when: not certificate_key_plane_check.stat.exists 69 | 70 | - name: Print Control Plane Join command 71 | debug: 72 | msg: "{{ certificate_key_plane.stdout_lines[0] }}" 73 | when: not certificate_key_plane_check.stat.exists 74 | 75 | - name: Save certificate-key to file on server 76 | copy: 77 | content: "{{ certificate_key_plane.stdout }}" 78 | dest: /certificate-key 79 | when: not certificate_key_plane_check.stat.exists 80 | 81 | - name: Copy certificate-key to local file 82 | local_action: copy content="{{ certificate_key_plane.stdout_lines[0] }}" dest="./certificate-key" 83 | become: false 84 | when: not certificate_key_plane_check.stat.exists 85 | 86 | ######### Generate K8s Cluster Join Command ############### 87 | - name: Check if join command already exists 88 | become: no 89 | stat: 90 | path: "/join-command-worker-node" 91 | register: join_command_check 92 | 93 | - name: Generate join command 94 | command: kubeadm token create --print-join-command 95 | register: join_command 96 | when: not join_command_check.stat.exists 97 | 98 | - name: Print Join command 99 | debug: 100 | msg: "{{ join_command.stdout_lines[0] }}" 101 | when: not join_command_check.stat.exists 102 | 103 | - name: Save join command to file on server 104 | copy: 105 | content: "{{ join_command.stdout }}" 106 | dest: /join-command-worker-node 107 | when: not join_command_check.stat.exists 108 | 109 | - name: Copy join command to local file 110 | local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command" 111 | become: false 112 | when: not join_command_check.stat.exists 113 | 114 | 115 | ######### Install Network plugin CNI - Calico ############### 116 | - name: Check if calico resources already exist 117 | shell: kubectl get namespaces tigera-operator 118 | become: false 119 | ignore_errors: true 120 | register: calico_namespace_check 121 | 122 | - name: Download calico.conf 123 | get_url: 124 | url: https://raw.githubusercontent.com/projectcalico/calico/v{{ calico_version }}/manifests/tigera-operator.yaml 125 | dest: /home/vagrant/calico.yaml 126 | become: false 127 | when: calico_namespace_check.rc != 0 128 | 129 | - name: Install calico pod network 130 | become: false 131 | command: kubectl create -f /home/vagrant/calico.yaml 132 | when: calico_namespace_check.rc != 0 133 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # About 2 | 3 | Project to create a local environment to run kubernetes server. 4 | 5 | The vagrant creates a Kubernetes Cluster with N control planes and N workers. 6 | 7 | To Control Plane works in HA, we need a loadbalancing to them. The haproxy will be this loadbalancig with a new instance for him. 8 | 9 | All server configuration is made by Ansible. 10 | 11 | # Prerequisites 12 | 13 | - Vagrant 14 | - Ansible 15 | 16 | # Version 17 | 18 | 19 | ## Vagrantfile 20 | 21 | Verify the Vagrantfile to get the version of all used components. 22 | 23 | On Vagrantfile you can change the variables as needed. 24 | 25 | ```ssh 26 | IMAGE_NAME = "ubuntu/jammy64" # ubuntu 22.04 27 | QTD_CONTROL_PLANES = 2 28 | QTD_WORKER_NODES = 2 29 | ``` 30 | 31 | IMAGE_NAME: Image name to Vagrant file 32 | 33 | QTD_NODES: Quantity of worker nodes 34 | 35 | ## Ansible / Kubernetes Configuration 36 | 37 | Edit the kubernetes-setup/vars.yml as needed 38 | 39 | 40 | ```yaml 41 | # k8s_version: "1.30" # You can use this way to install the latest minor version 42 | k8s_version: "1.30.1-1.1" # You can use this way to install exacly version 43 | k8s_major_version: "1.30" 44 | calico_version: "3.28.0" 45 | ``` 46 | 47 | K8S_VERSION: Version of kubelet/kubeadm/kubectl to install. You can use exacly version or Major and the script will select for you based on SO repository. 48 | 49 | K8S_MAJOR_VERSION: Major kubernetes version (needs be based on K8S_VERSION minor) 50 | 51 | CALICO_VERSION: Version of CALICO (Kubernetes Network Add-on) 52 | 53 | # How to manage the environment 54 | 55 | When the vagrant run, it will create the servers and provision the configuration. 56 | 57 | Only needs to run: 58 | 59 | ## Starting environment 60 | 61 | ```ssh 62 | vagrant up 63 | ``` 64 | 65 | or to be quickly, you can create simulteously the instances: 66 | 67 | ```ssh 68 | vagrant up haproxy & 69 | vagrant up controlplane-1 & 70 | vagrant up controlplane-2 & 71 | vagrant up node-1 & 72 | vagrant up node-2 73 | ``` 74 | 75 | 76 | ## Stop 77 | 78 | ```ssh 79 | vagrant halt 80 | ``` 81 | 82 | ## Suspend 83 | 84 | ```ssh 85 | vagrant suspend 86 | ``` 87 | 88 | ## Resume 89 | 90 | ```ssh 91 | vagrant resume 92 | ``` 93 | 94 | ## Clear / Destroy 95 | 96 | ```ssh 97 | vagrant destroy 98 | ``` 99 | 100 | # Ansible 101 | 102 | To configure the VM created by vagrant, run the ansible with the command: 103 | 104 | ``` 105 | ansible-playbook main.yml 106 | ``` 107 | 108 | ## About Ansible 109 | 110 | The ansible configuration is on ansible.cfg 111 | 112 | You can check the inventory in hosts.ini file. 113 | 114 | When the ansible runs, it will execute main.yml, who will call the kuebrentes-setup/k8s_setup.yml. 115 | 116 | The playbooks called by kuebrentes-setup/k8s_setup.yml load the vars.yml. 117 | 118 | haproxy.yml: Configure Control Plane Loadbalance with HAProxy 119 | 120 | k8s_common.yml: Configure Kubernetes packages and CRI 121 | 122 | k8s_control_plane.yml: Configure the First Control Plane 123 | 124 | k8s_control_plane_join.yml: Configure Others Control Planes by kubeadm join command 125 | 126 | k8s_worker_node.yml: Configure worker nodes by kubeadm command 127 | 128 | 129 | 130 | # Kubectl 131 | 132 | The ansible script configure the user vagrant inside k8s-master VM (Control Plane) and copy the configuration to admin.conf file. 133 | 134 | ## Test inside Control plane 135 | 136 | Access the k8s-master VM 137 | 138 | ```ssh 139 | vagrant ssh k8s-master 140 | ``` 141 | 142 | Execute the kubectl command 143 | 144 | ```ssh 145 | vagrant@k8s-master:~$ kubectl get nodes 146 | NAME STATUS ROLES AGE VERSION 147 | controlplane-1 Ready control-plane 26m v1.30.1 148 | controlplane-2 Ready control-plane 25s v1.30.1 149 | node-1 Ready 9m17s v1.30.1 150 | node-2 Ready 6m5s v1.30.1 151 | ``` 152 | 153 | ## Test direct from your PC 154 | 155 | Copy the admin.conf file to .kube folder to configure 156 | 157 | ```ssh 158 | cp kubernetes-setup/admin.conf ~/.kube/config 159 | ``` 160 | 161 | Execute the kubectl command 162 | 163 | ```ssh 164 | $ kubectl get pods -A 165 | NAMESPACE NAME READY STATUS RESTARTS AGE 166 | kube-system coredns-7db6d8ff4d-n55jj 1/1 Running 0 31m 167 | kube-system coredns-7db6d8ff4d-ssxhp 1/1 Running 0 31m 168 | kube-system etcd-k8s-master 1/1 Running 0 31m 169 | kube-system kube-apiserver-k8s-master 1/1 Running 0 31m 170 | kube-system kube-controller-manager-k8s-master 1/1 Running 0 31m 171 | kube-system kube-proxy-48jdc 1/1 Running 0 31m 172 | kube-system kube-proxy-5pr48 1/1 Running 0 25m 173 | kube-system kube-proxy-vlgdx 1/1 Running 0 28m 174 | kube-system kube-scheduler-k8s-master 1/1 Running 0 31m 175 | tigera-operator tigera-operator-76ff79f7fd-lshkn 1/1 Running 0 31 176 | ``` 177 | -------------------------------------------------------------------------------- /kubernetes-setup/k8s_common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: controlplanefirst,controlplaneha,worker 3 | become: true 4 | vars_files: 5 | - vars.yml 6 | tasks: 7 | - name: fail if not tested ubuntu version 8 | fail: 9 | msg: "OS should be Ubuntu 22.04, not {{ ansible_distribution }} {{ ansible_distribution_version }}" 10 | when: ansible_distribution != 'Ubuntu' or ansible_distribution_version != '22.04' 11 | 12 | - name: Install necessary packages 13 | apt: 14 | name: "{{ packages }}" 15 | state: present 16 | update_cache: yes 17 | vars: 18 | packages: 19 | - apt-transport-https 20 | - ca-certificates 21 | - curl 22 | - software-properties-common 23 | - gnupg2 24 | - net-tools 25 | 26 | 27 | ######### Prepare for CRI ############### 28 | - name: Get IP eth1 addr 29 | shell: ifconfig eth1 | grep 'inet' | cut -d{{':'}} -f2 | awk '{ print $2 }' 30 | register: node_ip 31 | 32 | - name: Configure CRI 33 | blockinfile: 34 | create: true 35 | path: /etc/modules-load.d/CRI.conf 36 | block: | 37 | overlay 38 | br_netfilter 39 | 40 | - name: Enable kernel modules 41 | become: yes 42 | shell: | 43 | modprobe overlay 44 | modprobe br_netfilter 45 | 46 | - name: Configure IP forwarding and iptables 47 | blockinfile: 48 | create: true 49 | path: /etc/sysctl.d/CRI.conf 50 | block: | 51 | net.bridge.bridge-nf-call-iptables = 1 52 | net.bridge.bridge-nf-call-ip6tables = 1 53 | net.ipv4.ip_forward = 1 54 | 55 | - name: Persist changes 56 | command: sysctl -p 57 | 58 | - name: Apply sysctl settings 59 | command: sysctl --system 60 | 61 | 62 | # ######### Install docker rep for container.d ############### 63 | # - name: Add an apt signing key for Docker 64 | # apt_key: 65 | # url: https://download.docker.com/linux/ubuntu/gpg 66 | # state: present 67 | 68 | # - name: Add apt repository for stable version 69 | # apt_repository: 70 | # repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable 71 | # state: present 72 | # filename: docker.list 73 | 74 | 75 | ######### Install k8s rep ############### 76 | - name: Add an apt signing key for Kubernetes 77 | apt_key: 78 | url: https://pkgs.k8s.io/core:/stable:/v{{ k8s_major_version }}/deb/Release.key 79 | state: present 80 | 81 | - name: Adding apt repository for Kubernetes 82 | apt_repository: 83 | repo: deb https://pkgs.k8s.io/core:/stable:/v{{ k8s_major_version }}/deb/ / 84 | state: present 85 | filename: kubernetes.list 86 | 87 | ######### Install CRI-O rep ############### 88 | - name: Add an apt signing key for CRI-O 89 | apt_key: 90 | url: https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/Release.key 91 | state: present 92 | 93 | - name: Adding apt repository for CRI-O 94 | apt_repository: 95 | repo: deb https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/ / 96 | state: present 97 | filename: cri-o.list 98 | 99 | 100 | ######### Update apt package index ############### 101 | - name: Update apt package index again 102 | apt: 103 | update_cache: yes 104 | 105 | 106 | ######### Install CRI ############### 107 | - name: Install CRI 108 | apt: 109 | name: "{{ packages }}" 110 | state: present 111 | update_cache: yes 112 | vars: 113 | packages: 114 | # - containerd.io 115 | # - docker-ce-cli 116 | - cri-o 117 | # notify: 118 | # - docker status 119 | # notify: crio service 120 | 121 | - name: Restart crio 122 | ansible.builtin.systemd_service: 123 | name: crio 124 | state: restarted 125 | 126 | ######### Remote swap to kubelet ############### 127 | - name: Remove swapfile from /etc/fstab 128 | mount: 129 | name: "{{ item }}" 130 | fstype: swap 131 | state: absent 132 | with_items: 133 | - swap 134 | - none 135 | 136 | - name: Disable swap 137 | command: swapoff -a 138 | when: ansible_swaptotal_mb > 0 139 | 140 | 141 | ######### Install kube commands ############### 142 | 143 | # search the latest to complete the version choosed by user 144 | - name: Get the latest version of kubelet {{ k8s_version }} 145 | shell: apt-cache madison kubelet | grep {{ k8s_version }} | head -n1 | awk '{print $3}' 146 | register: kubelet_version 147 | 148 | - name: Print Kubelet version finded 149 | debug: 150 | msg: "{{ kubelet_version.stdout }}" 151 | 152 | # Install based on kubelet_version finded 153 | - name: Install Kubernetes binaries 154 | apt: 155 | name: "{{ packages }}" 156 | state: present 157 | update_cache: yes 158 | vars: 159 | packages: 160 | - kubelet={{ kubelet_version.stdout }} 161 | - kubeadm={{ kubelet_version.stdout }} 162 | - kubectl={{ kubelet_version.stdout }} 163 | 164 | - name: Hold k8s packages 165 | ansible.builtin.dpkg_selections: 166 | name: "{{ item }}" 167 | selection: hold 168 | loop: 169 | - kubelet 170 | - kubeadm 171 | - kubectl 172 | --------------------------------------------------------------------------------