├── .gitignore ├── .flake8 ├── scripts ├── turndown.sh ├── turnup.sh └── update_inventory.py ├── CHANGELOG.md ├── collections ├── requirements.yml └── sysengquick │ └── k3s │ ├── README.md │ ├── roles │ ├── k3sup │ │ ├── tasks │ │ │ ├── setup_kube_vip.yml │ │ │ ├── add_workers.yml │ │ │ ├── add_servers.yml │ │ │ ├── setup_kube_vip_cloud_controller.yml │ │ │ ├── add_workers_loop.yml │ │ │ ├── add_servers_loop.yml │ │ │ ├── deploy_cluster.yml │ │ │ ├── setup_kube_vip_cloud_controller_tasks.yml │ │ │ └── setup_kube_vip_tasks.yml │ │ ├── vars │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── proxmox │ │ ├── tasks │ │ │ ├── pause_for_proxmox.yml │ │ │ ├── loop_nodes.yml │ │ │ ├── remove_template.yml │ │ │ ├── remove_node.yml │ │ │ ├── snapshot.yml │ │ │ ├── start_node.yml │ │ │ ├── prep_node.yml │ │ │ ├── create_node.yml │ │ │ └── create_template.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ └── rancher │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ ├── deploy_cert_manager.yml │ │ └── deploy_rancher.yml │ ├── playbooks │ ├── group_vars │ │ └── all │ │ │ ├── commands.yaml │ │ │ ├── cluster.yml │ │ │ └── roles.yml │ ├── test.yml │ ├── vars │ │ └── proxmox │ │ │ ├── vault.example.yml │ │ │ └── vault.yml │ ├── prep_nodes.yml │ ├── create_template.yml │ ├── snapshot_cluster.yml │ ├── create_nodes.yml │ ├── remove_snapshot.yml │ ├── deploy_rancher.yml │ ├── create_cluster.yml │ ├── remove_cluster.yml │ ├── rollback_cluster.yml │ └── deploy_k3s.yml │ ├── plugins │ └── README.md │ ├── CHANGELOG.md │ ├── meta │ └── runtime.yml │ └── galaxy.yml ├── ansible.cfg ├── .devcontainer ├── docker-compose.yaml ├── postCreateCommand.sh ├── devcontainer.json └── Dockerfile ├── pyproject.toml ├── inventory.yaml ├── .github └── workflows │ └── build-container.yaml ├── logs ├── turndown.log └── turnup.log ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | per-file-ignores = */__init__.py: F401 4 | -------------------------------------------------------------------------------- /scripts/turndown.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | time ansible-playbook sysengquick.k3s.remove_cluster 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | See [collection changelog](collections/sysengquick/k3s/CHANGELOG.md) 4 | -------------------------------------------------------------------------------- /collections/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: community.general 4 | version: ">8.1.0,<9.0.0" 5 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Collection - sysengquick.k3s 2 | 3 | Documentation for the collection. 4 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory.yaml 3 | vault_password_file = /private/ansible-vault/sysengquick-k3s 4 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/setup_kube_vip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include kube_vip tasks 3 | ansible.builtin.include_tasks: setup_kube_vip_tasks.yml 4 | when: k3sup_use_kube_vip 5 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/group_vars/all/commands.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | global_cmd: 3 | ctr: /usr/local/bin/ctr 4 | k3sup: /usr/local/bin/k3sup 5 | kubectl: /usr/local/bin/kubectl 6 | helm: /usr/sbin/helm 7 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/pause_for_proxmox.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Pause for proxmox to catch up 3 | ansible.builtin.pause: 4 | seconds: "{{ proxmox_pause_timeout | default(proxmox_timeouts.creation) }}" 5 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/add_workers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add workers to k3s cluster 3 | ansible.builtin.include_tasks: add_workers_loop.yml 4 | loop: "{{ cluster_nodes.workers }}" 5 | loop_control: 6 | loop_var: node 7 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/add_servers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add servers to k3s cluster 3 | ansible.builtin.include_tasks: add_servers_loop.yml 4 | loop: "{{ cluster_nodes.servers[1:] }}" 5 | loop_control: 6 | loop_var: node 7 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test playbook to verify environment setup 3 | hosts: localhost 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Testing 8 | ansible.builtin.debug: 9 | msg: Testing 10 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/setup_kube_vip_cloud_controller.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include setup_kube_vip_cloud_controller_tasks 3 | ansible.builtin.include_tasks: setup_kube_vip_cloud_controller_tasks.yml 4 | when: k3sup_use_kube_vip_cloud_controller 5 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/loop_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Run tasks on all k3s nodes 3 | ansible.builtin.include_tasks: "{{ proxmox_loop_task }}" 4 | loop: "{{ cluster_nodes.servers + cluster_nodes.workers }}" 5 | loop_control: 6 | loop_var: node 7 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/vars/proxmox/vault.example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vault: 3 | proxmox_api: 4 | # proxmox API token secret 5 | token_secret: NO_SECRETS_IN_GIT_PLEASE 6 | 7 | proxmox_template: 8 | # cloud init password 9 | cipassword: NO_SECRETS_IN_GIT_PLEASE 10 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/prep_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prep nodes 3 | hosts: k3s 4 | gather_facts: false 5 | become: true 6 | 7 | tasks: 8 | - name: Prep nodes 9 | ansible.builtin.include_role: 10 | name: sysengquick.k3s.proxmox 11 | tasks_from: prep_node 12 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/create_template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create proxmox template 3 | hosts: proxmox 4 | gather_facts: false 5 | vars_files: 6 | - vars/proxmox/vault.yml 7 | 8 | tasks: 9 | - name: Create proxmox template 10 | ansible.builtin.include_role: 11 | name: sysengquick.k3s.proxmox 12 | tasks_from: create_template 13 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/rancher/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rancher_bootstrap_password: AReallyGoodP@ssw0rd 3 | rancher_cert_manager_version: v1.14.4 4 | rancher_debug: "{{ global_debug | default(true) }}" 5 | rancher_hostname: rancher.k3s.local.technoplaza.net 6 | rancher_lb_service_enable: false 7 | rancher_lb_service_name: rancher-lb 8 | rancher_replicas: 3 9 | rancher_version: 2.8.2 10 | -------------------------------------------------------------------------------- /.devcontainer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | k3s: 4 | image: ghcr.io/sysengquick-yt/k3s:1.0.2 5 | command: sleep infinity 6 | volumes: 7 | - kube:/home/vscode/.kube 8 | - bash_history:/bash_history 9 | - ${HOME}/private:/private:cached 10 | - ${HOME}/.ssh:/home/vscode/.ssh:cached 11 | - ..:/workspace:cached 12 | 13 | volumes: 14 | kube: 15 | bash_history: 16 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/snapshot_cluster.yml: -------------------------------------------------------------------------------- 1 | - name: Snapshot cluster 2 | hosts: localhost 3 | gather_facts: false 4 | vars_files: 5 | - vars/proxmox/vault.yml 6 | 7 | tasks: 8 | - name: Snapshot all k3s nodes 9 | ansible.builtin.include_role: 10 | name: sysengquick.k3s.proxmox 11 | tasks_from: loop_nodes 12 | vars: 13 | proxmox_loop_task: snapshot.yml 14 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/create_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create proxmox nodes 3 | hosts: localhost 4 | gather_facts: false 5 | vars_files: 6 | - vars/proxmox/vault.yml 7 | 8 | tasks: 9 | - name: Create proxmox nodes 10 | ansible.builtin.include_role: 11 | name: sysengquick.k3s.proxmox 12 | tasks_from: loop_nodes 13 | vars: 14 | proxmox_loop_task: create_node.yml 15 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/remove_template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove template 3 | community.general.proxmox_kvm: 4 | api_host: "{{ proxmox_api.host }}" 5 | api_user: "{{ proxmox_api.user }}" 6 | api_token_id: "{{ proxmox_api.token_id }}" 7 | api_token_secret: "{{ proxmox_api.token_secret }}" 8 | node: "{{ proxmox_node }}" 9 | state: absent 10 | vmid: "{{ proxmox_template.vmid }}" 11 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/remove_snapshot.yml: -------------------------------------------------------------------------------- 1 | - name: Remove snapshot 2 | hosts: localhost 3 | gather_facts: false 4 | vars_files: 5 | - vars/proxmox/vault.yml 6 | 7 | tasks: 8 | - name: Remove snapshot on all k3s nodes 9 | ansible.builtin.include_role: 10 | name: sysengquick.k3s.proxmox 11 | tasks_from: loop_nodes 12 | vars: 13 | proxmox_loop_task: snapshot.yml 14 | proxmox_snapshot_state: absent 15 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/deploy_rancher.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy rancher 3 | hosts: localhost 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Deploy cert-manager 8 | ansible.builtin.include_role: 9 | name: sysengquick.k3s.rancher 10 | tasks_from: deploy_cert_manager 11 | 12 | - name: Deploy rancher 13 | ansible.builtin.include_role: 14 | name: sysengquick.k3s.rancher 15 | tasks_from: deploy_rancher 16 | -------------------------------------------------------------------------------- /.devcontainer/postCreateCommand.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | UID=$(id -u) 4 | 5 | # fix docker created volume permissions 6 | for folder in ~/.kube /bash_history /private 7 | do 8 | test $(stat -c %u $folder) -ne $UID \ 9 | && sudo chown vscode:vscode $folder 10 | done 11 | 12 | VAULT_DIR="/private/ansible-vault" 13 | VAULT_FILE="${VAULT_DIR}/sysengquick-k3s" 14 | 15 | mkdir -p ${VAULT_DIR} 2>/dev/null \ 16 | && touch -a ${VAULT_FILE} \ 17 | && chmod 600 ${VAULT_FILE} 18 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/create_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create the k3s template 3 | ansible.builtin.import_playbook: sysengquick.k3s.create_template 4 | 5 | - name: Create the k3s nodes 6 | ansible.builtin.import_playbook: sysengquick.k3s.create_nodes 7 | 8 | - name: Prep the k3s nodes 9 | ansible.builtin.import_playbook: sysengquick.k3s.prep_nodes 10 | 11 | - name: Snapshot the cluster 12 | ansible.builtin.import_playbook: sysengquick.k3s.snapshot_cluster 13 | vars: 14 | proxmox_snapshot: base 15 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/remove_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove k3s node 3 | community.general.proxmox_kvm: 4 | api_host: "{{ proxmox_api.host }}" 5 | api_user: "{{ proxmox_api.user }}" 6 | api_token_id: "{{ proxmox_api.token_id }}" 7 | api_token_secret: "{{ proxmox_api.token_secret }}" 8 | name: "{{ node.name }}" 9 | force: true 10 | node: "{{ proxmox_node }}" 11 | state: absent 12 | timeout: 30 13 | delay: 10 14 | register: result 15 | retries: 3 16 | until: result is not failed 17 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/remove_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove k3s nodes and template 3 | hosts: localhost 4 | gather_facts: false 5 | vars_files: 6 | - vars/proxmox/vault.yml 7 | 8 | tasks: 9 | - name: Remove k3s nodes 10 | ansible.builtin.include_role: 11 | name: sysengquick.k3s.proxmox 12 | tasks_from: loop_nodes 13 | vars: 14 | proxmox_loop_task: remove_node.yml 15 | 16 | - name: Remove k3s template 17 | ansible.builtin.include_role: 18 | name: sysengquick.k3s.proxmox 19 | tasks_from: remove_template 20 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "k3s" 3 | version = "1.0.0" 4 | description = "k3s demo repo for syseng quick" 5 | authors = ["John Ratliff "] 6 | license = "APACHE-2.0" 7 | readme = "README.md" 8 | package-mode = false 9 | 10 | [tool.poetry.dependencies] 11 | python = "^3.12" 12 | 13 | [tool.poetry.group.dev.dependencies] 14 | ansible-core = "<2.17" 15 | ansible-lint = { version = "^24.2.1", markers = 'platform_system != "Windows"' } 16 | proxmoxer = "^2.0.1" 17 | requests = "^2.31.0" 18 | 19 | [build-system] 20 | requires = ["poetry-core"] 21 | build-backend = "poetry.core.masonry.api" 22 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/add_workers_loop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add worker to k3s cluster 3 | ansible.builtin.command: 4 | cmd: >- 5 | {{ global_cmd.k3sup }} join 6 | {{ k3sup_connect_arg }} 7 | --k3s-version '{{ k3sup_k3s_version }}' 8 | {{ k3sup_server_connect_arg }} 9 | --ssh-key '{{ k3sup_ssh_key }}' 10 | --user '{{ k3sup_user }}' 11 | changed_when: true 12 | register: result 13 | 14 | - name: Display output 15 | ansible.builtin.debug: 16 | msg: 17 | cmd: "{{ result.cmd }}" 18 | stdout: "{{ result.stdout_lines }}" 19 | when: k3sup_debug 20 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/snapshot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check proxmox_snapshot variable 3 | ansible.builtin.assert: 4 | that: proxmox_snapshot is defined 5 | fail_msg: proxmox_snapshot is required 6 | 7 | - name: Proxmox snapshot 8 | community.general.proxmox_snap: 9 | api_host: "{{ proxmox_api.host }}" 10 | api_user: "{{ proxmox_api.user }}" 11 | api_token_id: "{{ proxmox_api.token_id }}" 12 | api_token_secret: "{{ proxmox_api.token_secret }}" 13 | hostname: "{{ node.name }}" 14 | snapname: "{{ proxmox_snapshot }}" 15 | state: "{{ proxmox_snapshot_state | default('present') }}" 16 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/rancher/tasks/deploy_cert_manager.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy cert-manager with helm 3 | ansible.builtin.command: 4 | cmd: >- 5 | {{ global_cmd.helm }} upgrade 6 | --install cert-manager jetstack/cert-manager 7 | --create-namespace 8 | --namespace cert-manager 9 | --set installCRDs=true 10 | --version {{ rancher_cert_manager_version }} 11 | changed_when: true 12 | register: result 13 | 14 | - name: Display output 15 | ansible.builtin.debug: 16 | msg: 17 | cmd: "{{ result.cmd }}" 18 | stdout: "{{ result.stdout_lines }}" 19 | when: rancher_debug 20 | -------------------------------------------------------------------------------- /scripts/turnup.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | ################################################################################ 6 | # COMMENT THIS OUT IF YOU AREN'T USING PROXMOX 7 | 8 | time ansible-playbook sysengquick.k3s.create_cluster 9 | 10 | ################################################################################ 11 | # Everything else is for k3s and rancher 12 | 13 | time ansible-playbook sysengquick.k3s.deploy_k3s 14 | time ansible-playbook sysengquick.k3s.deploy_rancher 15 | 16 | time kubectl -n cattle-system rollout status deploy/rancher 17 | 18 | kubectl get nodes -o wide 19 | kubectl get pods --all-namespaces -o wide 20 | kubectl -n cattle-system get deploy rancher 21 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/add_servers_loop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add server to k3s cluster 3 | ansible.builtin.command: 4 | cmd: >- 5 | {{ global_cmd.k3sup }} join 6 | {{ k3sup_connect_arg }} 7 | --k3s-extra-args '{{ k3sup_extra_args_str }}' 8 | --k3s-version '{{ k3sup_k3s_version }}' 9 | --server 10 | {{ k3sup_server_connect_arg }} 11 | --ssh-key '{{ k3sup_ssh_key }}' 12 | {{ k3sup_tls_san_arg }} 13 | --user '{{ k3sup_user }}' 14 | changed_when: true 15 | register: result 16 | 17 | - name: Display output 18 | ansible.builtin.debug: 19 | msg: 20 | cmd: "{{ result.cmd }}" 21 | stdout: "{{ result.stdout_lines }}" 22 | when: k3sup_debug 23 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/start_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Assert variable definitions 3 | ansible.builtin.assert: 4 | that: proxmox_node_name is defined or proxmox_node_vmid is defined 5 | fail_msg: One of proxmox_node_name/proxmox_node_vmid is required 6 | 7 | - name: Pause for proxmox 8 | ansible.builtin.include_tasks: pause_for_proxmox.yml 9 | 10 | - name: Start node 11 | community.general.proxmox_kvm: 12 | api_host: "{{ proxmox_api.host }}" 13 | api_user: "{{ proxmox_api.user }}" 14 | api_token_id: "{{ proxmox_api.token_id }}" 15 | api_token_secret: "{{ proxmox_api.token_secret }}" 16 | name: "{{ proxmox_node_name | default(omit) }}" 17 | state: started 18 | vmid: "{{ proxmox_node_vmid | default(omit) }}" 19 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/deploy_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy cluster 3 | ansible.builtin.command: 4 | cmd: >- 5 | {{ global_cmd.k3sup }} install --cluster 6 | --context '{{ k3sup_context }}' 7 | {{ k3sup_connect_arg }} 8 | --k3s-extra-args '{{ k3sup_extra_args_str }}' 9 | --k3s-version '{{ k3sup_k3s_version }}' 10 | --local-path '{{ k3sup_local_path }}' 11 | --ssh-key '{{ k3sup_ssh_key }}' 12 | {{ k3sup_tls_san_arg }} 13 | --user '{{ k3sup_user }}' 14 | changed_when: true 15 | register: result 16 | vars: 17 | node: "{{ cluster_nodes.servers[0] }}" 18 | 19 | - name: Display output 20 | ansible.builtin.debug: 21 | msg: 22 | cmd: "{{ result.cmd }}" 23 | stdout: "{{ result.stdout_lines }}" 24 | when: k3sup_debug 25 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/vars/proxmox/vault.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 33356530376562363161333564666161343432326262353466363064306434313465383434353663 3 | 6235636638613832353832626232363730383734306662640a626364333733373836656536373865 4 | 38656161353964356362386230393234636133353531643730303938323537623334356264343830 5 | 6363306436616664650a663665376535383430323132383766626332663866393961643532623464 6 | 62316337663838613330646533633835633466623562356630336664343566343935376561623564 7 | 64666166643664623239363738306230376536643061396237343264303162653661663464646136 8 | 63396332373335613364396536313132326563643035633830386432333437623839633034356437 9 | 37323538623532323834396331316636386330366639393539393034383566636436363165666434 10 | 63393762353335353662663461336164353161336666346236636162636332653164393435653737 11 | 3062393962663931376261376537643130336166393733316435 12 | -------------------------------------------------------------------------------- /inventory.yaml: -------------------------------------------------------------------------------- 1 | all: 2 | children: 3 | k3s: 4 | children: 5 | servers: 6 | hosts: 7 | s1.k3s.local.technoplaza.net: 8 | ansible_host: 192.168.1.51 9 | s2.k3s.local.technoplaza.net: 10 | ansible_host: 192.168.1.52 11 | s3.k3s.local.technoplaza.net: 12 | ansible_host: 192.168.1.53 13 | workers: 14 | hosts: 15 | w1.k3s.local.technoplaza.net: 16 | ansible_host: 192.168.1.54 17 | w2.k3s.local.technoplaza.net: 18 | ansible_host: 192.168.1.55 19 | vars: 20 | ansible_ssh_user: pve 21 | proxmox: 22 | hosts: 23 | pve.local.technoplaza.net: {} 24 | vars: 25 | ansible_ssh_user: ansible 26 | vars: 27 | ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null 28 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/group_vars/all/cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # cluster nodes 3 | cluster_nodes: 4 | servers: 5 | - name: s1.k3s.local.technoplaza.net 6 | ip: 192.168.1.51 7 | - name: s2.k3s.local.technoplaza.net 8 | ip: 192.168.1.52 9 | - name: s3.k3s.local.technoplaza.net 10 | ip: 192.168.1.53 11 | workers: 12 | - name: w1.k3s.local.technoplaza.net 13 | ip: 192.168.1.54 14 | - name: w2.k3s.local.technoplaza.net 15 | ip: 192.168.1.55 16 | 17 | # ssh user on k3s nodes with passwordless sudo 18 | node_ssh_user: pve 19 | 20 | # 21 | # These values don't matter if you aren't using proxmox 22 | # 23 | 24 | # k3s cluster network configuration 25 | proxmox_network: 26 | cidr: 24 27 | gateway: 192.168.1.1 28 | 29 | # proxmox server hostname/IP 30 | proxmox_server: pve.local.technoplaza.net 31 | 32 | # ssh user with sudo on proxmox 33 | proxmox_ssh_user: ansible 34 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/prep_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Wait for server to startup 3 | ansible.builtin.wait_for_connection: 4 | timeout: "{{ proxmox_timeouts.startup }}" 5 | 6 | # 7 | # PROBLEM: server may not be fully up, even if wait_for_connection returns 8 | # SOLUTION: retry the first task a couple times with a short delay 9 | # 10 | - name: Install packages 11 | ansible.builtin.package: 12 | name: 13 | - iptables 14 | - qemu-guest-agent 15 | state: present 16 | register: result 17 | retries: 3 18 | delay: "{{ proxmox_timeouts.creation }}" 19 | until: result is not failed 20 | 21 | - name: Start qemu-guest-agent 22 | ansible.builtin.service: 23 | name: qemu-guest-agent 24 | state: started 25 | 26 | - name: Load installed packages 27 | ansible.builtin.package_facts: 28 | 29 | - name: Stop and disable ufw 30 | community.general.ufw: 31 | state: disabled 32 | when: "'ufw' in ansible_facts.packages" 33 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/rollback_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Rollback cluster 3 | hosts: localhost 4 | gather_facts: false 5 | vars_files: 6 | - vars/proxmox/vault.yml 7 | 8 | tasks: 9 | - name: Rollback snapshot on all k3s nodes 10 | ansible.builtin.include_role: 11 | name: sysengquick.k3s.proxmox 12 | tasks_from: loop_nodes 13 | vars: 14 | proxmox_loop_task: snapshot.yml 15 | proxmox_snapshot_state: rollback 16 | 17 | - name: Pause for proxmox 18 | ansible.builtin.include_role: 19 | name: sysengquick.k3s.proxmox 20 | tasks_from: pause_for_proxmox 21 | vars: 22 | proxmox_pause_timeout: 15 23 | 24 | - name: Restart hosts 25 | ansible.builtin.include_role: 26 | name: sysengquick.k3s.proxmox 27 | tasks_from: loop_nodes 28 | vars: 29 | proxmox_node_name: "{{ node.name }}" 30 | proxmox_loop_task: start_node.yml 31 | proxmox_pause_timeout: 1 32 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/plugins/README.md: -------------------------------------------------------------------------------- 1 | # Collections Plugins Directory 2 | 3 | This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that 4 | is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that 5 | would contain module utils and modules respectively. 6 | 7 | Here is an example directory of the majority of plugins currently supported by Ansible: 8 | 9 | ``` 10 | └── plugins 11 | ├── action 12 | ├── become 13 | ├── cache 14 | ├── callback 15 | ├── cliconf 16 | ├── connection 17 | ├── filter 18 | ├── httpapi 19 | ├── inventory 20 | ├── lookup 21 | ├── module_utils 22 | ├── modules 23 | ├── netconf 24 | ├── shell 25 | ├── strategy 26 | ├── terminal 27 | ├── test 28 | └── vars 29 | ``` 30 | 31 | A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible-core/2.16/plugins/plugins.html). 32 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | proxmox_api: 3 | host: "{{ proxmox_server }}" 4 | token_id: ansible 5 | token_secret: "{{ vault.proxmox_api.token_secret }}" 6 | user: root@pam 7 | 8 | proxmox_image_delete: false 9 | proxmox_image_dest: "/tmp/{{ proxmox_image.name }}" 10 | 11 | proxmox_node: pve 12 | 13 | proxmox_template: 14 | block_storage: false 15 | bridge: vmbr0 16 | ciuser: "{{ node_ssh_user }}" 17 | cipassword: "{{ vault.proxmox_template.cipassword }}" 18 | cores: 4 19 | disk_format: qcow2 20 | image: jammy_minimal 21 | memory: 4096 22 | name: ansible-k3s-cloud-template 23 | size: 10G 24 | ssd: true 25 | sshkeys: | 26 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDaZmy03rApaPZNuN1I8sfmVM/vLv+WD8c2smYyBLJJe sysengquick@wsl2 27 | storage: local 28 | vmid: 2500 29 | 30 | proxmox_timeouts: 31 | api: 120 32 | creation: 15 33 | startup: 300 34 | 35 | # use become plugin for sudo access to qm disk import command 36 | # see notes in create_templae.yml for more information 37 | proxmox_use_become: false 38 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## 1.0.2 - 2024-04-28 4 | 5 | - add README 6 | - cleanup post create command 7 | - add alma linux 8 cloud image 8 | - template the command paths 9 | - update defaults 10 | - update devcontainer config 11 | - fix typo in turnup script 12 | 13 | ## 1.0.1 - 2024-03-22 14 | 15 | - update ansible-lint 16 | - remove nested ansible_collections directory 17 | - move Dockerfile work directory to /app 18 | - narrow the version of poetry installed in Dockerfile 19 | - move Dockerfile OCI labels to top 20 | - bump docker-compose image version from ghcr 21 | - remove k3s_cp_server inventory group 22 | - fix deploy_k3s play names 23 | - add global_debug flag to override k3sup_debug and rancher_debug 24 | - rename cluster_network to proxmox_network 25 | - template the k3sup local-path argument in k3sup defaults 26 | - add playbook log output 27 | 28 | ## 1.0.0 - 2024-03-20 29 | 30 | - initial release 31 | - creates proxmox virtual machines 32 | - installs k3s with k3sup on all cluster nodes 33 | - deploys kube-vip and kube-vip cloud controller 34 | - deploys rancher 35 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/setup_kube_vip_cloud_controller_tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Apply kube-vip cloud controller manifest 3 | ansible.builtin.command: 4 | cmd: "{{ global_cmd.kubectl }} apply -f {{ k3sup_kube_vip_manifests.cloud_controller }}" 5 | changed_when: true 6 | register: result 7 | 8 | - name: Display output 9 | ansible.builtin.debug: 10 | msg: 11 | cmd: "{{ result.cmd }}" 12 | stdout: "{{ result.stdout_lines }}" 13 | when: k3sup_debug 14 | 15 | - name: Apply IP Range to cloud controller 16 | ansible.builtin.shell: 17 | cmd: >- 18 | set -o pipefail && 19 | {{ global_cmd.kubectl }} create configmap 20 | -n kube-system 21 | kubevip 22 | -o yaml 23 | --dry-run=client 24 | --from-literal range-global={{ k3sup_ip_range.start }}-{{ k3sup_ip_range.end }} 25 | | {{ global_cmd.kubectl }} apply -f - 26 | executable: /bin/bash 27 | changed_when: true 28 | register: result 29 | 30 | - name: Display output 31 | ansible.builtin.debug: 32 | msg: 33 | cmd: "{{ result.cmd }}" 34 | stdout: "{{ result.stdout_lines }}" 35 | when: k3sup_debug 36 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/deploy_k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy k3s 3 | hosts: localhost 4 | gather_facts: false 5 | 6 | tasks: 7 | - name: Deploy k3s 8 | ansible.builtin.include_role: 9 | name: sysengquick.k3s.k3sup 10 | tasks_from: deploy_cluster 11 | 12 | - name: Setup kube-vip 13 | hosts: "{{ hostvars['localhost']['cluster_nodes']['servers'][0]['name'] }}" 14 | become: true 15 | gather_facts: false 16 | 17 | tasks: 18 | - name: Setup kube-vip 19 | ansible.builtin.include_role: 20 | name: sysengquick.k3s.k3sup 21 | tasks_from: setup_kube_vip 22 | 23 | - name: Setup kube-vip cloud controller 24 | ansible.builtin.include_role: 25 | name: sysengquick.k3s.k3sup 26 | tasks_from: setup_kube_vip_cloud_controller 27 | 28 | - name: Add additional nodes to k3s cluster 29 | hosts: localhost 30 | gather_facts: false 31 | 32 | tasks: 33 | - name: Add additional servers to cluster 34 | ansible.builtin.include_role: 35 | name: sysengquick.k3s.k3sup 36 | tasks_from: add_servers 37 | 38 | - name: Add workers to cluster 39 | ansible.builtin.include_role: 40 | name: sysengquick.k3s.k3sup 41 | tasks_from: add_workers 42 | -------------------------------------------------------------------------------- /.github/workflows/build-container.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Container Image 3 | 4 | on: 5 | push: 6 | tags: ["*"] 7 | workflow_dispatch: 8 | 9 | env: 10 | REGISTRY: ghcr.io 11 | IMAGE_NAME: ${{ github.repository }} 12 | 13 | jobs: 14 | build-and-push-image: 15 | runs-on: ubuntu-latest 16 | 17 | permissions: 18 | contents: read 19 | packages: write 20 | 21 | steps: 22 | - name: Checkout repo 23 | uses: actions/checkout@v4 24 | 25 | - name: Login to container registry 26 | uses: docker/login-action@v3 27 | with: 28 | registry: ${{ env.REGISTRY }} 29 | username: ${{ github.actor }} 30 | password: ${{ secrets.GITHUB_TOKEN }} 31 | 32 | - name: Extract metadata 33 | id: metadata 34 | uses: docker/metadata-action@v5 35 | with: 36 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 37 | tags: | 38 | type=ref,event=branch 39 | type=ref,event=tag 40 | type=sha 41 | 42 | - name: Build/push container image 43 | uses: docker/build-push-action@v5 44 | with: 45 | push: true 46 | file: .devcontainer/Dockerfile 47 | labels: ${{ steps.metadata.outputs.labels }} 48 | tags: ${{ steps.metadata.outputs.tags }} 49 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | k3sup_extra_args_str: >- 3 | {{ 4 | ( 5 | k3sup_extra_args 6 | + k3sup_k3s_disable_servicelb 7 | + k3sup_k3s_server_noschedule_taint 8 | ) 9 | | join(' ') 10 | }} 11 | k3sup_k3s_disable_servicelb: >- 12 | {{ 13 | ['--disable servicelb'] 14 | if (k3sup_use_kube_vip or k3sup_use_kube_vip_cloud_controller) else [] 15 | }} 16 | k3sup_k3s_server_noschedule_taint: >- 17 | {{ 18 | ['--node-taint node-role.kubernetes.io/control-plane:NoSchedule'] 19 | if k3sup_k3s_server_noschedule else [] 20 | }} 21 | k3sup_kube_vip_image: "ghcr.io/kube-vip/kube-vip:{{ k3sup_kube_vip_version }}" 22 | k3sup_kube_vip_manifests: 23 | rbac: https://kube-vip.io/manifests/rbac.yaml 24 | cloud_controller: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml 25 | k3sup_connect_arg: >- 26 | {{ 27 | ('--ip ' ~ node.ip) if k3sup_prefer_ip 28 | else ('--host ' ~ node.name) 29 | }} 30 | k3sup_server_connect_arg: >- 31 | {{ 32 | ('--server-ip ' ~ cluster_nodes.servers[0].ip) if k3sup_prefer_ip 33 | else ('--server-host ' ~ cluster_nodes.servers[0].name) 34 | }} 35 | k3sup_tls_san_arg: >- 36 | {{ 37 | ('--tls-san ' ~ k3sup_vip) if k3sup_use_kube_vip else '' 38 | }} 39 | k3sup_user: "{{ node_ssh_user }}" 40 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/create_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create k3s node from template 3 | community.general.proxmox_kvm: 4 | api_host: "{{ proxmox_api.host }}" 5 | api_user: "{{ proxmox_api.user }}" 6 | api_token_id: "{{ proxmox_api.token_id }}" 7 | api_token_secret: "{{ proxmox_api.token_secret }}" 8 | name: "{{ node.name }}" 9 | node: "{{ proxmox_node }}" 10 | clone: "{{ proxmox_template.name }}" 11 | format: "{{ proxmox_disk_format }}" 12 | full: true 13 | storage: "{{ proxmox_template.storage }}" 14 | timeout: "{{ proxmox_timeouts.api }}" 15 | vmid: "{{ proxmox_template.vmid }}" 16 | register: result 17 | 18 | - name: Register vmid 19 | ansible.builtin.set_fact: 20 | proxmox_node_vmid: "{{ result.vmid }}" 21 | 22 | - name: Update IP configuration 23 | community.general.proxmox_kvm: 24 | api_host: "{{ proxmox_api.host }}" 25 | api_user: "{{ proxmox_api.user }}" 26 | api_token_id: "{{ proxmox_api.token_id }}" 27 | api_token_secret: "{{ proxmox_api.token_secret }}" 28 | ipconfig: 29 | ipconfig0: "ip={{ node.ip }}/{{ proxmox_network.cidr }},gw={{ proxmox_network.gateway }}" 30 | node: "{{ proxmox_node }}" 31 | state: present 32 | update: true 33 | vmid: "{{ proxmox_node_vmid }}" 34 | 35 | - name: Start node 36 | ansible.builtin.include_tasks: start_node.yml 37 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/rancher/tasks/deploy_rancher.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy rancher with helm 3 | ansible.builtin.command: 4 | cmd: >- 5 | {{ global_cmd.helm }} upgrade 6 | --install rancher rancher-stable/rancher 7 | --create-namespace 8 | --namespace cattle-system 9 | --set bootstrapPassword={{ rancher_bootstrap_password }} 10 | --set hostname={{ rancher_hostname }} 11 | --set replicas={{ rancher_replicas }} 12 | --version {{ rancher_version }} 13 | changed_when: true 14 | register: result 15 | 16 | - name: Display output 17 | ansible.builtin.debug: 18 | msg: 19 | cmd: "{{ result.cmd }}" 20 | stdout: "{{ result.stdout_lines }}" 21 | when: rancher_debug 22 | 23 | - name: Add load balancer service for rancher 24 | when: rancher_lb_service_enable 25 | block: 26 | - name: Expose rancher load balancer service 27 | ansible.builtin.command: 28 | cmd: >- 29 | {{ global_cmd.kubectl }} expose deployment rancher 30 | --name={{ rancher_lb_service_name }} 31 | --port=443 32 | -n cattle-system 33 | --type=LoadBalancer 34 | changed_when: true 35 | register: result 36 | 37 | - name: Display output 38 | ansible.builtin.debug: 39 | msg: 40 | cmd: "{{ result.cmd }}" 41 | stdout: "{{ result.stdout_lines }}" 42 | when: rancher_debug 43 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sysengquick-k3s", 3 | "dockerComposeFile": "docker-compose.yaml", 4 | "service": "k3s", 5 | "workspaceFolder": "/workspace", 6 | "postCreateCommand": ".devcontainer/postCreateCommand.sh", 7 | 8 | "customizations": { 9 | "vscode": { 10 | "extensions": [ 11 | "bierner.markdown-preview-github-styles", 12 | "DavidAnson.vscode-markdownlint", 13 | "dhoeric.ansible-vault", 14 | "eamodio.gitlens", 15 | "esbenp.prettier-vscode", 16 | "mhutchie.git-graph", 17 | "ms-python.black-formatter", 18 | "ms-python.flake8", 19 | "oderwat.indent-rainbow", 20 | "redhat.ansible", 21 | "tamasfe.even-better-toml", 22 | "yzhang.markdown-all-in-one" 23 | ], 24 | "settings": { 25 | "[json]": { 26 | "editor.defaultFormatter": "esbenp.prettier-vscode" 27 | }, 28 | "[jsonc]": { 29 | "editor.defaultFormatter": "esbenp.prettier-vscode" 30 | }, 31 | "[markdown]": { 32 | "editor.defaultFormatter": "esbenp.prettier-vscode" 33 | }, 34 | "ansible.python.interpreterPath": "/usr/local/bin/python", 35 | "editor.formatOnSave": true, 36 | "files.associations": { 37 | "*.yml": "ansible" 38 | }, 39 | "files.trimFinalNewlines": true, 40 | "files.trimTrailingWhitespace": true 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/playbooks/group_vars/all/roles.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ################################################################################ 3 | # Role overrides 4 | # 5 | # Role defaults WILL need some update 6 | # 7 | # I have listed several keys I think are likely to be different for you 8 | # This list is not meant to be exhaustive 9 | # Please read the defaults and update them accordingly 10 | # 11 | # This file is only here for documentation purposes 12 | # I do not recommend adding these values to this file 13 | # 14 | ################################################################################ 15 | # collections/ansible_collections/sysengquick/k3s/roles/k3sup/defaults/main.yml 16 | # 17 | # k3sup_ip_range 18 | # The kube-vip cloud controller IP range 19 | # k3sup_vip 20 | # The kube-vip virtual IP for the k3s API 21 | # 22 | ################################################################################ 23 | # collections/ansible_collections/sysengquick/k3s/roles/proxmox/defaults/main.yml 24 | # 25 | # proxmox_node 26 | # proxmox node name in your cluster 27 | # 28 | # proxmox_template.sshkeys 29 | # ssh key for the cloud-init template 30 | # proxmox_template.storage 31 | # proxmox storage ID for the disks 32 | # 33 | ################################################################################ 34 | # collections/ansible_collections/sysengquick/k3s/roles/rancher/defaults/main.yml 35 | # 36 | # rancher_hostname 37 | # rancher FQDN 38 | ################################################################################ 39 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # kubeconfig context -- mostly cosmetic 3 | k3sup_context: k3s-ha 4 | 5 | # display CLI command output 6 | k3sup_debug: "{{ global_debug | default(true) }}" 7 | 8 | # additional arguments to pass to k3s on server nodes 9 | # e.g. --disable traefik 10 | k3sup_extra_args: [] 11 | 12 | # the default interface on the cluster nodes 13 | k3sup_iface: eth0 14 | 15 | # range of IPs kube-vip cloud controller will assign 16 | k3sup_ip_range: 17 | start: 192.168.1.60 18 | end: 192.168.1.69 19 | 20 | # rancher 2.8 cannot run on 1.28 as of 2024-03-11 21 | k3sup_k3s_version: v1.27.11+k3s1 22 | 23 | # when true, prevent workloads from being scheduled on control plane nodes 24 | k3sup_k3s_server_noschedule: false 25 | 26 | # kube-vip lease parameters 27 | k3sup_kube_vip_lease_duration: 30 # default 5 28 | k3sup_kube_vip_lease_renew_duration: 10 # default 3 29 | k3sup_kube_vip_lease_retry: 3 # default 1 30 | 31 | k3sup_kube_vip_version: v0.7.1 32 | 33 | # where to place the kubeconfig file after installation 34 | k3sup_local_path: "{{ lookup('ansible.builtin.env', 'HOME') }}/.kube/config" 35 | 36 | # connect and setup cluster with IP addresses rather than hostnames 37 | k3sup_prefer_ip: true 38 | 39 | # ssh key ks3up will use to connect to the nodes 40 | k3sup_ssh_key: "{{ lookup('ansible.builtin.env', 'HOME') }}/.ssh/id_ed25519" 41 | 42 | # enable/disable installation of kube-vip / kube-vip cloud controller 43 | k3sup_use_kube_vip: true 44 | k3sup_use_kube_vip_cloud_controller: true 45 | 46 | # VIP used by kube-vip for the control plane nodes 47 | k3sup_vip: 192.168.1.50 48 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/meta/runtime.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Collections must specify a minimum required ansible version to upload 3 | # to galaxy 4 | # requires_ansible: '>=2.9.10' 5 | requires_ansible: ">=2.16.0" 6 | # Content that Ansible needs to load from another location or that has 7 | # been deprecated/removed 8 | # plugin_routing: 9 | # action: 10 | # redirected_plugin_name: 11 | # redirect: ns.col.new_location 12 | # deprecated_plugin_name: 13 | # deprecation: 14 | # removal_version: "4.0.0" 15 | # warning_text: | 16 | # See the porting guide on how to update your playbook to 17 | # use ns.col.another_plugin instead. 18 | # removed_plugin_name: 19 | # tombstone: 20 | # removal_version: "2.0.0" 21 | # warning_text: | 22 | # See the porting guide on how to update your playbook to 23 | # use ns.col.another_plugin instead. 24 | # become: 25 | # cache: 26 | # callback: 27 | # cliconf: 28 | # connection: 29 | # doc_fragments: 30 | # filter: 31 | # httpapi: 32 | # inventory: 33 | # lookup: 34 | # module_utils: 35 | # modules: 36 | # netconf: 37 | # shell: 38 | # strategy: 39 | # terminal: 40 | # test: 41 | # vars: 42 | 43 | # Python import statements that Ansible needs to load from another location 44 | # import_redirection: 45 | # ansible_collections.ns.col.plugins.module_utils.old_location: 46 | # redirect: ansible_collections.ns.col.plugins.module_utils.new_location 47 | 48 | # Groups of actions/modules that take a common set of options 49 | # action_groups: 50 | # group_name: 51 | # - module1 52 | # - module2 53 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | proxmox_disk_format: "{{ 'raw' if proxmox_template.block_storage else proxmox_template.disk_format }}" 3 | proxmox_image: "{{ proxmox_images[proxmox_template.image] }}" 4 | proxmox_image_location: "{{ proxmox_template.storage }}:\ 5 | {{ proxmox_image_location_prefix }}\ 6 | vm-{{ proxmox_template.vmid }}-disk-0{{ proxmox_image_location_ext }}" 7 | proxmox_image_location_ext: "{{ '' if proxmox_template.block_storage else ('.' ~ proxmox_disk_format) }}" 8 | proxmox_image_location_prefix: "{{ '' if proxmox_template.block_storage else (proxmox_template.vmid ~ '/') }}" 9 | proxmox_image_url: 10 | image: "{{ proxmox_image.base ~ proxmox_image.name }}" 11 | digest: "{{ proxmox_image.base ~ proxmox_image.digest }}" 12 | proxmox_images: 13 | alma_linux_8: 14 | base: https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/ 15 | name: AlmaLinux-8-GenericCloud-latest.x86_64.qcow2 16 | digest: CHECKSUM 17 | method: sha256 18 | alma_linux_9: 19 | base: https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/ 20 | name: AlmaLinux-9-GenericCloud-latest.x86_64.qcow2 21 | digest: CHECKSUM 22 | method: sha256 23 | debian_12: &bookworm 24 | base: https://cloud.debian.org/images/cloud/bookworm/latest/ 25 | name: debian-12-genericcloud-amd64.qcow2 26 | digest: SHA512SUMS 27 | method: sha512 28 | bookworm: *bookworm 29 | ubuntu_2204: &jammy 30 | base: https://cloud-images.ubuntu.com/jammy/current/ 31 | name: jammy-server-cloudimg-amd64-disk-kvm.img 32 | digest: SHA256SUMS 33 | method: sha256 34 | ubuntu_2204_minimal: &jammy_minimal 35 | base: https://cloud-images.ubuntu.com/minimal/releases/jammy/release/ 36 | name: ubuntu-22.04-minimal-cloudimg-amd64.img 37 | digest: SHA256SUMS 38 | method: sha256 39 | jammy: *jammy 40 | jammy_minimal: *jammy_minimal 41 | proxmox_qm_disk_import: >- 42 | qm disk import 43 | {{ proxmox_template.vmid }} 44 | {{ proxmox_image_dest }} 45 | {{ proxmox_template.storage }} 46 | --format {{ proxmox_disk_format }} 47 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/k3sup/tasks/setup_kube_vip_tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Apply kube-vip RBAC manifest 3 | ansible.builtin.command: 4 | cmd: "{{ global_cmd.kubectl }} apply -f {{ k3sup_kube_vip_manifests.rbac }}" 5 | changed_when: true 6 | register: result 7 | 8 | - name: Display output 9 | ansible.builtin.debug: 10 | msg: 11 | cmd: "{{ result.cmd }}" 12 | stdout: "{{ result.stdout_lines }}" 13 | when: k3sup_debug 14 | 15 | - name: Fetch kube-vip container image 16 | ansible.builtin.command: 17 | cmd: "{{ global_cmd.ctr }} image pull {{ k3sup_kube_vip_image }}" 18 | changed_when: true 19 | register: result 20 | 21 | - name: Display output 22 | ansible.builtin.debug: 23 | msg: 24 | cmd: "{{ result.cmd }}" 25 | stdout: "{{ result.stdout_lines }}" 26 | when: k3sup_debug 27 | 28 | - name: Apply kube-vip daemonset manifest 29 | ansible.builtin.shell: 30 | cmd: >- 31 | set -o pipefail && 32 | {{ global_cmd.ctr }} run --rm --net-host {{ k3sup_kube_vip_image }} vip /kube-vip 33 | manifest daemonset 34 | --address {{ k3sup_vip }} 35 | --arp 36 | --controlplane 37 | --inCluster 38 | --interface {{ k3sup_iface }} 39 | --leaderElection 40 | --leaseDuration {{ k3sup_kube_vip_lease_duration }} 41 | --leaseRenewDuration {{ k3sup_kube_vip_lease_renew_duration }} 42 | --leaseRetry {{ k3sup_kube_vip_lease_retry }} 43 | --services 44 | --taint 45 | | tee /var/lib/rancher/k3s/server/manifests/kube-vip.yaml 46 | executable: /bin/bash 47 | changed_when: true 48 | register: result 49 | 50 | - name: Display output 51 | ansible.builtin.debug: 52 | msg: 53 | cmd: "{{ result.cmd }}" 54 | stdout: "{{ result.stdout_lines }}" 55 | when: k3sup_debug 56 | 57 | - name: Update kubectl config 58 | ansible.builtin.lineinfile: 59 | path: "{{ lookup('ansible.builtin.env', 'HOME') }}/.kube/config" 60 | regexp: '(\s+server:) https://' 61 | line: '\1 https://{{ k3sup_vip }}:6443' 62 | backrefs: true 63 | delegate_to: localhost 64 | -------------------------------------------------------------------------------- /scripts/update_inventory.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | 4 | _DEFAULT_PROXMOX_SERVER: str = "proxmox.technoplaza.net" 5 | _DEFAULT_PROXMOX_SSH_USER: str = "ansible" 6 | 7 | 8 | def main(): 9 | path_prefix: str = ".." if os.getcwd().endswith("/scripts") else "." 10 | 11 | config_path = f"{path_prefix}/collections/sysengquick/k3s/playbooks/group_vars/all/cluster.yml" 12 | 13 | with open(config_path, "r") as f: 14 | config: dict = yaml.safe_load(f) 15 | 16 | inventory = { 17 | "all": { 18 | "children": { 19 | "proxmox": { 20 | "hosts": { 21 | config.get("proxmox_server", _DEFAULT_PROXMOX_SERVER): {}, 22 | }, 23 | "vars": { 24 | "ansible_ssh_user": config.get( 25 | "proxmox_ssh_user", _DEFAULT_PROXMOX_SSH_USER 26 | ), 27 | }, 28 | }, 29 | "k3s": { 30 | "children": { 31 | "servers": { 32 | "hosts": {}, 33 | }, 34 | "workers": {"hosts": {}}, 35 | }, 36 | "vars": { 37 | "ansible_ssh_user": config["node_ssh_user"], 38 | }, 39 | }, 40 | }, 41 | "vars": { 42 | "ansible_ssh_common_args": "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", 43 | }, 44 | } 45 | } 46 | 47 | inventory["all"]["children"]["k3s"]["children"]["servers"]["hosts"] = { 48 | server["name"]: {"ansible_host": server["ip"]} 49 | for server in config["cluster_nodes"]["servers"] 50 | } 51 | 52 | inventory["all"]["children"]["k3s"]["children"]["workers"]["hosts"] = { 53 | worker["name"]: {"ansible_host": worker["ip"]} 54 | for worker in config["cluster_nodes"]["workers"] 55 | } 56 | 57 | inventory_path = f"{path_prefix}/inventory.yaml" 58 | 59 | with open(inventory_path, "w") as f: 60 | yaml.safe_dump(inventory, f) 61 | 62 | 63 | if __name__ == "__main__": 64 | main() 65 | -------------------------------------------------------------------------------- /logs/turndown.log: -------------------------------------------------------------------------------- 1 | 2 | PLAY [Remove k3s nodes and template] ******************************************* 3 | 4 | TASK [Remove k3s nodes] ******************************************************** 5 | 6 | TASK [sysengquick.k3s.proxmox : Run tasks on all k3s nodes] ******************** 7 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/remove_node.yml for localhost => (item={'name': 's1.k3s.local.technoplaza.net', 'ip': '192.168.1.101'}) 8 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/remove_node.yml for localhost => (item={'name': 's2.k3s.local.technoplaza.net', 'ip': '192.168.1.102'}) 9 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/remove_node.yml for localhost => (item={'name': 's3.k3s.local.technoplaza.net', 'ip': '192.168.1.103'}) 10 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/remove_node.yml for localhost => (item={'name': 'w1.k3s.local.technoplaza.net', 'ip': '192.168.1.111'}) 11 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/remove_node.yml for localhost => (item={'name': 'w2.k3s.local.technoplaza.net', 'ip': '192.168.1.112'}) 12 | 13 | TASK [sysengquick.k3s.proxmox : Remove k3s node] ******************************* 14 | changed: [localhost] 15 | 16 | TASK [sysengquick.k3s.proxmox : Remove k3s node] ******************************* 17 | changed: [localhost] 18 | 19 | TASK [sysengquick.k3s.proxmox : Remove k3s node] ******************************* 20 | changed: [localhost] 21 | 22 | TASK [sysengquick.k3s.proxmox : Remove k3s node] ******************************* 23 | changed: [localhost] 24 | 25 | TASK [sysengquick.k3s.proxmox : Remove k3s node] ******************************* 26 | changed: [localhost] 27 | 28 | TASK [Remove k3s template] ***************************************************** 29 | 30 | TASK [sysengquick.k3s.proxmox : Remove template] ******************************* 31 | changed: [localhost] 32 | 33 | PLAY RECAP ********************************************************************* 34 | localhost : ok=11 changed=6 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 35 | 36 | 37 | real 0m37.363s 38 | user 0m4.957s 39 | sys 0m0.476s 40 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/python:1-3.12-bullseye 2 | 3 | # OCI labels 4 | LABEL org.opencontainers.image.source=https://github.com/sysengquick/k3s 5 | LABEL org.opencontainers.image.description="devcontainer image for building sysenquick k3s cluster" 6 | LABEL org.opencontainers.image.licenses=Apache-2.0 7 | 8 | # install poetry 9 | RUN python3 -m pip install poetry~=1.8.2 10 | 11 | # install poetry dependencies 12 | WORKDIR /app 13 | 14 | COPY poetry.lock pyproject.toml /app 15 | RUN poetry config virtualenvs.create false && poetry install 16 | 17 | # install collection requirements 18 | COPY collections/requirements.yml /app 19 | RUN su vscode -c "ansible-galaxy collection install -r requirements.yml" 20 | RUN su vscode -c "ln -s /workspace/collections/sysengquick ~/.ansible/collections/ansible_collections/" 21 | 22 | # enable git bash completion and preserve bash history 23 | RUN su vscode -c "echo 'source /usr/share/bash-completion/completions/git' >> ~/.bashrc" 24 | RUN su vscode -c "echo 'export HISTFILE=/bash_history/history.txt' >> ~/.bashrc" 25 | 26 | # install iputils-ping and dnsutils 27 | RUN apt-get update && apt-get install -y iputils-ping dnsutils 28 | 29 | # install k3sup 30 | ARG K3SUP_VERSION=0.13.5 31 | RUN curl -sLS \ 32 | https://github.com/alexellis/k3sup/releases/download/${K3SUP_VERSION}/k3sup \ 33 | -o /usr/local/bin/k3sup \ 34 | && chmod 755 /usr/local/bin/k3sup 35 | 36 | # install kubectl 37 | ARG KUBECTL_VERSION=v1.27 38 | RUN mkdir -p -m 755 /etc/apt/keyrings 39 | RUN curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBECTL_VERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg 40 | RUN echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/'${KUBECTL_VERSION}'/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list 41 | RUN apt-get update && apt-get install -y kubectl 42 | 43 | # install helm 44 | RUN curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null 45 | RUN echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 46 | RUN apt-get update && apt-get install -y helm 47 | 48 | # install helm repos for rancher 49 | RUN su vscode -c "helm repo add rancher-stable https://releases.rancher.com/server-charts/stable" 50 | RUN su vscode -c "helm repo add jetstack https://charts.jetstack.io" 51 | RUN su vscode -c "helm repo update" 52 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/galaxy.yml: -------------------------------------------------------------------------------- 1 | ### REQUIRED 2 | # The namespace of the collection. This can be a company/brand/organization or product namespace under which all 3 | # content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with 4 | # underscores or numbers and cannot contain consecutive underscores 5 | namespace: sysengquick 6 | 7 | # The name of the collection. Has the same character restrictions as 'namespace' 8 | name: k3s 9 | 10 | # The version of the collection. Must be compatible with semantic versioning 11 | version: 1.0.2 12 | 13 | # The path to the Markdown (.md) readme file. This path is relative to the root of the collection 14 | readme: README.md 15 | 16 | # A list of the collection's content authors. Can be just the name or in the format 'Full Name (url) 17 | # @nicks:irc/im.site#channel' 18 | authors: 19 | - John Ratliff 20 | 21 | ### OPTIONAL but strongly recommended 22 | # A short summary description of the collection 23 | description: Ansible collection for building k3s lab 24 | 25 | # Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only 26 | # accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file' 27 | license: 28 | - Apache-2.0 29 | 30 | # The path to the license file for the collection. This path is relative to the root of the collection. This key is 31 | # mutually exclusive with 'license' 32 | # license_file: "" 33 | 34 | # A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character 35 | # requirements as 'namespace' and 'name' 36 | tags: [linux] 37 | 38 | # Collections that this collection requires to be installed for it to be usable. The key of the dict is the 39 | # collection label 'namespace.name'. The value is a version range 40 | # L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version 41 | # range specifiers can be set and are separated by ',' 42 | dependencies: { community.general: ">=8.1.0,<9.0.0" } 43 | 44 | # The URL of the originating SCM repository 45 | repository: http://github.com/sysengquick/k3s 46 | 47 | # The URL to any online docs 48 | documentation: http://github.com/sysengquick/k3s 49 | 50 | # The URL to the homepage of the collection/project 51 | homepage: http://github.com/sysengquick/k3s 52 | 53 | # The URL to the collection issue tracker 54 | issues: http://github.com/sysengquick/k3s/issues 55 | 56 | # A list of file glob-like patterns used to filter any files or directories that should not be included in the build 57 | # artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This 58 | # uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry', 59 | # and '.git' are always filtered. Mutually exclusive with 'manifest' 60 | build_ignore: [] 61 | # A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a 62 | # list of MANIFEST.in style 63 | # L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key 64 | # 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive 65 | # with 'build_ignore' 66 | # manifest: null 67 | -------------------------------------------------------------------------------- /collections/sysengquick/k3s/roles/proxmox/tasks/create_template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test sudo access 3 | ansible.builtin.command: echo 'Hello' 4 | become: true 5 | changed_when: false 6 | when: proxmox_use_become 7 | 8 | - name: Create template VM 9 | community.general.proxmox_kvm: 10 | api_host: "{{ proxmox_api.host }}" 11 | api_user: "{{ proxmox_api.user }}" 12 | api_token_id: "{{ proxmox_api.token_id }}" 13 | api_token_secret: "{{ proxmox_api.token_secret }}" 14 | name: "{{ proxmox_template.name }}" 15 | agent: enabled=1 16 | balloon: 0 17 | boot: order=scsi0 18 | cipassword: "{{ proxmox_template.cipassword }}" 19 | ciuser: "{{ proxmox_template.ciuser }}" 20 | cores: "{{ proxmox_template.cores }}" 21 | cpu: host 22 | ide: 23 | ide2: "{{ proxmox_template.storage }}:cloudinit,format={{ proxmox_disk_format }}" 24 | ipconfig: 25 | ipconfig0: "ip=dhcp" 26 | memory: "{{ proxmox_template.memory }}" 27 | net: 28 | net0: "model=virtio,bridge={{ proxmox_template.bridge }},firewall=0,mtu=1" 29 | node: "{{ proxmox_node }}" 30 | ostype: l26 31 | scsihw: virtio-scsi-single 32 | serial: 33 | serial0: socket 34 | sshkeys: "{{ proxmox_template.sshkeys }}" 35 | vga: serial0 36 | vmid: "{{ proxmox_template.vmid }}" 37 | delegate_to: localhost 38 | 39 | - name: Import cloud image to template 40 | block: 41 | - name: Download cloud image 42 | ansible.builtin.get_url: 43 | checksum: "{{ proxmox_image.method }}:{{ proxmox_image_url.digest }}" 44 | dest: "{{ proxmox_image_dest }}" 45 | mode: "0644" 46 | url: "{{ proxmox_image_url.image }}" 47 | 48 | # Check if the disk is already attached 49 | # This is needed later to keep the playbook idempotent 50 | - name: Get disk imfo 51 | community.general.proxmox_disk: 52 | api_host: "{{ proxmox_api.host }}" 53 | api_user: "{{ proxmox_api.user }}" 54 | api_token_id: "{{ proxmox_api.token_id }}" 55 | api_token_secret: "{{ proxmox_api.token_secret }}" 56 | create: disabled 57 | disk: scsi0 58 | vmid: "{{ proxmox_template.vmid }}" 59 | register: result 60 | failed_when: false 61 | delegate_to: localhost 62 | 63 | - name: Register template_attach_disk 64 | ansible.builtin.set_fact: 65 | template_attach_disk: "{{ result.msg | ansible.builtin.regex_search('^Disk scsi0 not found') is not none }}" 66 | 67 | # No need to do anything if the disk is already attached 68 | - name: Create disk from cloud image 69 | when: template_attach_disk 70 | block: 71 | # 72 | # Problem: API requires root to create disk images from files 73 | # Solution: Login to the server and run qm manually 74 | # 75 | # Alternative: Use root and password (API token on root doesn't work) 76 | # But I don't like using account passwords for APIs 77 | # 78 | # Problem 2: Running qm isn't idempotent 79 | # Solution: Check if disk has been created and only run qm if is isn't 80 | # 81 | - name: Get disk info 82 | community.general.proxmox_disk: 83 | api_host: "{{ proxmox_api.host }}" 84 | api_user: "{{ proxmox_api.user }}" 85 | api_token_id: "{{ proxmox_api.token_id }}" 86 | api_token_secret: "{{ proxmox_api.token_secret }}" 87 | create: disabled 88 | disk: unused0 89 | vmid: "{{ proxmox_template.vmid }}" 90 | register: result 91 | failed_when: false 92 | delegate_to: localhost 93 | 94 | - name: Register disk status 95 | ansible.builtin.set_fact: 96 | template_disk_created: "{{ result.msg | ansible.builtin.regex_search('^Disk unused0 not found') is none }}" 97 | 98 | - name: Branch based on become method 99 | when: not template_disk_created 100 | block: 101 | # 102 | # Use sudo directly in the command 103 | # 104 | # Benefits: Allows limited sudo privilegs for the user 105 | # Only qm disk import needs to be added to sudo config 106 | # 107 | # Drawbacks: We cannot prompt for a sudo password here 108 | # The command must have passwordless sudo 109 | # 110 | - name: Create disk from cloud image 111 | ansible.builtin.command: 112 | cmd: "sudo -n {{ proxmox_qm_disk_import }}" 113 | changed_when: true 114 | when: not proxmox_use_become 115 | 116 | # 117 | # Use become plugin 118 | # 119 | # Benefits: Supply become password with --ask-become-pass 120 | # Also works if user has passwordless sudo access 121 | # 122 | # Drawbacks: The user needs full sudo privileges 123 | # The become plugin does more than just run this command 124 | # 125 | - name: Create disk from cloud image 126 | ansible.builtin.command: 127 | cmd: "{{ proxmox_qm_disk_import }}" 128 | become: true 129 | changed_when: true 130 | when: proxmox_use_become 131 | 132 | - name: Attach disk 133 | community.general.proxmox_kvm: 134 | api_host: "{{ proxmox_api.host }}" 135 | api_user: "{{ proxmox_api.user }}" 136 | api_token_id: "{{ proxmox_api.token_id }}" 137 | api_token_secret: "{{ proxmox_api.token_secret }}" 138 | node: "{{ proxmox_node }}" 139 | scsi: 140 | scsi0: "{{ proxmox_image_location }}" 141 | vmid: "{{ proxmox_template.vmid }}" 142 | update: true 143 | update_unsafe: true 144 | delegate_to: localhost 145 | always: 146 | # block (Import cloud image to template) 147 | - name: Remove cloud image download 148 | ansible.builtin.file: 149 | path: "{{ proxmox_image_dest }}" 150 | state: absent 151 | when: proxmox_image_delete 152 | 153 | - name: Ensure disk size 154 | community.general.proxmox_disk: 155 | api_host: "{{ proxmox_api.host }}" 156 | api_user: "{{ proxmox_api.user }}" 157 | api_token_id: "{{ proxmox_api.token_id }}" 158 | api_token_secret: "{{ proxmox_api.token_secret }}" 159 | disk: scsi0 160 | size: "{{ proxmox_template.size }}" 161 | state: resized 162 | vmid: "{{ proxmox_template.vmid }}" 163 | delegate_to: localhost 164 | 165 | - name: Update disk properties 166 | community.general.proxmox_disk: 167 | api_host: "{{ proxmox_api.host }}" 168 | api_user: "{{ proxmox_api.user }}" 169 | api_token_id: "{{ proxmox_api.token_id }}" 170 | api_token_secret: "{{ proxmox_api.token_secret }}" 171 | discard: "on" 172 | disk: scsi0 173 | iothread: true 174 | ssd: true 175 | vmid: "{{ proxmox_template.vmid }}" 176 | when: proxmox_template.ssd 177 | delegate_to: localhost 178 | 179 | - name: Make the VM a template 180 | community.general.proxmox_kvm: 181 | api_host: "{{ proxmox_api.host }}" 182 | api_user: "{{ proxmox_api.user }}" 183 | api_token_id: "{{ proxmox_api.token_id }}" 184 | api_token_secret: "{{ proxmox_api.token_secret }}" 185 | node: "{{ proxmox_node }}" 186 | template: true 187 | vmid: "{{ proxmox_template.vmid }}" 188 | update: true 189 | delegate_to: localhost 190 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SysEng Quick Kubernetes Ansible Repo 2 | 3 | - [SysEng Quick Kubernetes Ansible Repo](#syseng-quick-kubernetes-ansible-repo) 4 | - [Basic Information](#basic-information) 5 | - [YouTube Series](#youtube-series) 6 | - [Components](#components) 7 | - [Requirements](#requirements) 8 | - [What if I don't want to use devcontainers?](#what-if-i-dont-want-to-use-devcontainers) 9 | - [Using Docker Directly](#using-docker-directly) 10 | - [Installing Dependencies Locally](#installing-dependencies-locally) 11 | - [Python Packages](#python-packages) 12 | - [Ansible Collections](#ansible-collections) 13 | - [k3sup](#k3sup) 14 | - [kubectl](#kubectl) 15 | - [helm](#helm) 16 | - [Instructions](#instructions) 17 | - [Configuring the Cluster](#configuring-the-cluster) 18 | - [Regenerating the Inventory](#regenerating-the-inventory) 19 | - [Deploying VMs on Proxmox](#deploying-vms-on-proxmox) 20 | - [Adding Ansible Secret Values](#adding-ansible-secret-values) 21 | - [Configure the Proxmox Role Defaults](#configure-the-proxmox-role-defaults) 22 | - [proxmox\_api](#proxmox_api) 23 | - [proxmox\_image\_delete](#proxmox_image_delete) 24 | - [promox\_image\_dest](#promox_image_dest) 25 | - [proxmox\_node](#proxmox_node) 26 | - [proxmox\_template](#proxmox_template) 27 | - [proxmox\_timeouts](#proxmox_timeouts) 28 | - [proxmox\_use\_become](#proxmox_use_become) 29 | - [Configuring the Cloud Images](#configuring-the-cloud-images) 30 | - [create\_cluster playbook](#create_cluster-playbook) 31 | - [remove\_cluster playbook](#remove_cluster-playbook) 32 | - [Deploying k3s on Your VMs](#deploying-k3s-on-your-vms) 33 | - [Configure the k3sup Role Defaults](#configure-the-k3sup-role-defaults) 34 | - [k3sup\_context](#k3sup_context) 35 | - [k3sup\_debug](#k3sup_debug) 36 | - [k3sup\_extra\_args](#k3sup_extra_args) 37 | - [k3sup\_iface](#k3sup_iface) 38 | - [k3sup\_ip\_range](#k3sup_ip_range) 39 | - [k3sup\_k3s\_version](#k3sup_k3s_version) 40 | - [k3sup\_k3s\_server\_noschedule](#k3sup_k3s_server_noschedule) 41 | - [k3sup\_kube\_vip\_lease\_XXX](#k3sup_kube_vip_lease_xxx) 42 | - [k3sup\_kube\_vip\_version](#k3sup_kube_vip_version) 43 | - [k3sup\_local\_path](#k3sup_local_path) 44 | - [k3sup\_prefer\_ip](#k3sup_prefer_ip) 45 | - [k3sup\_ssh\_key](#k3sup_ssh_key) 46 | - [k3sup\_use\_kube\_vip / k3sup\_use\_kube\_vip\_cloud\_controller](#k3sup_use_kube_vip--k3sup_use_kube_vip_cloud_controller) 47 | - [k3sup\_vip](#k3sup_vip) 48 | - [deploy\_k3s playbook](#deploy_k3s-playbook) 49 | - [Deploying Rancher to Your Cluster](#deploying-rancher-to-your-cluster) 50 | - [Configure the rancher Role Defaults](#configure-the-rancher-role-defaults) 51 | - [rancher\_bootstrap\_password](#rancher_bootstrap_password) 52 | - [rancher\_cert\_manager\_version](#rancher_cert_manager_version) 53 | - [rancher\_debug](#rancher_debug) 54 | - [rancher\_hostname](#rancher_hostname) 55 | - [rancher\_lb\_service\_enable](#rancher_lb_service_enable) 56 | - [rancher\_lb\_service\_name](#rancher_lb_service_name) 57 | - [rancher\_replicas](#rancher_replicas) 58 | - [rancher\_version](#rancher_version) 59 | - [deploy\_rancher playbook](#deploy_rancher-playbook) 60 | - [Additional Resources](#additional-resources) 61 | 62 | ## Basic Information 63 | 64 | This repo contains ansible playbooks to help you bootstrap a kubernetes cluster. 65 | If you have proxmox, it can even build the VMs for the cluster. 66 | 67 | ## YouTube Series 68 | 69 | I demonstrate the use of this repo in a playlist on my YouTube channel. 70 | 71 | [Kubernetes with Ansible Playlist](https://youtube.com/playlist?list=PLvadQtO-ihXvO-SoG5YQ1LfmQcTLv2fre&si=62lOhrioFZW-VW0Z) 72 | 73 | ## Components 74 | 75 | | Component | Description | 76 | | ------------------------- | -------------------------------------------------------------------- | 77 | | k3sup | Used to install k3s on the nodes | 78 | | k3s | The kubernetes implementation | 79 | | kube-vip | Manages a virtual IP (VIP) on the control plane nodes for k8s API HA | 80 | | kube-vip cloud controller | Replacement LoadBalancer controller for k3s built-in servicelb | 81 | | traefik | Ingress controller for the cluster | 82 | | rancher | Cluster web GUI | 83 | 84 | ## Requirements 85 | 86 | - At least one VM 87 | - k3s needs at least one node to start 88 | - At least 3 servers are required for HA 89 | - At least 2 workers are required for HA if you disable workloads on the servers 90 | - lxc containers might work, but probably not as these plays exist today 91 | - Devcontainers (and Docker/Podman) 92 | - Not strictly required, but the requirements are bundled making it easier 93 | - The ability to edit YAML files 94 | - The defaults will almost certainly need **some** tweaking 95 | 96 | ### What if I don't want to use devcontainers? 97 | 98 | The bundled devcontainer will make it much easier to use these plays. 99 | However, I understand this may not be for everyone. 100 | 101 | #### Using Docker Directly 102 | 103 | If you have docker, but don't want to use devcontainers, that's an option. 104 | 105 | ```sh 106 | docker pull ghcr.io/sysengquick/k3s:latest 107 | docker run --rm -it -v ".:/workspace" --name sysengquick-k3s ghcr.io/sysengquick/k3s:latest /bin/bash -c 'cd /workspace && sudo -u vscode bash' 108 | ``` 109 | 110 | You can build the image from the Dockerfile locally if you prefer. 111 | 112 | ```sh 113 | docker build -t ghcr.io/sysengquick/k3s:latest . 114 | docker run --rm -it -v ".:/workspace" --name sysengquick-k3s ghcr.io/sysengquick/k3s:latest /bin/bash -c 'cd /workspace && sudo -u vscode bash' 115 | ``` 116 | 117 | #### Installing Dependencies Locally 118 | 119 | If you don't want to use the container, you'll need to install the dependencies. 120 | 121 | Ansible only runs on Linux and macOS. 122 | If you're using Windows, you must use WSL. 123 | 124 | The Dockerfile is a pretty good template for what you need. 125 | Parts of it are just for convenience. 126 | 127 | ##### Python Packages 128 | 129 | I use poetry to manage python dependencies, but it's not required. 130 | 131 | I've only tested this with python 3.12 and ansible-core 2.16. 132 | Older versions might work, but have not been tested. 133 | 134 | Once python and pip are installed, you can install the required packages. 135 | 136 | ```sh 137 | python3 -m pip install ansible-core pyyaml 138 | ``` 139 | 140 | If you are using the proxmox plays, you will need two additional packages. 141 | 142 | ```sh 143 | python3 -m pip install proxmoxer requests 144 | ``` 145 | 146 | ##### Ansible Collections 147 | 148 | Now that ansible is installed, you need to install the required collections. 149 | 150 | ```sh 151 | ansible-galaxy collection install -r collections/requirements.yml 152 | ``` 153 | 154 | Ansible looks for collections in specific places. 155 | We can add a symlink so ansible can find our sysengquick.k3s collection. 156 | 157 | ```sh 158 | ln -s collections/sysengquick ~/.ansible/collections/ansible_collections/ 159 | ``` 160 | 161 | ##### k3sup 162 | 163 | We are using k3sup to install k3s, so we'll need that. 164 | 165 | ```sh 166 | export K3SUP_VERSION=0.13.5 167 | sudo curl -sLSo /usr/local/bin/k3sup \ 168 | https://github.com/alexellis/k3sup/releases/download/${K3SUP_VERSION}/k3sup 169 | sudo chmod 755 /usr/local/bin/k3sup 170 | ``` 171 | 172 | ##### kubectl 173 | 174 | We'll need kubectl to interact with our cluster. 175 | 176 | ```sh 177 | export KUBECTL_VERSION=v1.27.12 178 | sudo curl -sLSo /usr/local/bin/kubectl \ 179 | https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl 180 | sudo chmod 755 /usr/local/bin/kubectl 181 | ``` 182 | 183 | ##### helm 184 | 185 | We need helm to install cert-manager and rancher. 186 | 187 | ```sh 188 | export HELM_VERSION=v3.14.2 189 | pushd /tmp 190 | curl -sLS https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar xz 191 | sudo mv linux-amd64/helm /usr/local/bin 192 | rm -r linux-amd64 193 | popd 194 | ``` 195 | 196 | After helm has been installed, you need to add some helm repos. 197 | 198 | ```sh 199 | helm repo add rancher-stable https://releases.rancher.com/server-charts/stable 200 | helm repo add jetstack https://charts.jetstack.io 201 | helm repo update 202 | ``` 203 | 204 | ## Instructions 205 | 206 | Make sure you are using the devcontainer or have the requirements installed. 207 | 208 | ### Configuring the Cluster 209 | 210 | The playbooks need to know how the cluster should be configured. 211 | Open the file _collections/sysengquick/k3s/playbooks/group_vars/all/cluster.yml_. 212 | 213 | Update the cluster nodes with the names and IPs of your server and worker nodes. 214 | You need at least one server, and three for a true HA setup. 215 | Workers are only required if you prevent workloads from be scheduled on the servers. 216 | This is a best practice, but is not required. 217 | 218 | Update the node_ssh_user with the privileged user account on your nodes. 219 | This user must have ssh key authentication configured. 220 | It must be root or have full passwordless sudo privileges as root. 221 | 222 | If you are using the proxmox playbooks, cloud-init will set everything up for you. 223 | 224 | **NOTE**: The remaining keys are not important if you are not using proxmox. 225 | 226 | In proxmox_network, set gateway and cidr as appropriate for your cluster network. 227 | 228 | Set proxmox_server to the IP or DNS name for your proxmox API server. 229 | 230 | Set proxmox_ssh_user to be a user on your proxmox server that can run `qm disk import` as root. 231 | If you want to allow-list only this single command, it must be added with NOPASSWD. 232 | Otherwise, the user must have full sudo privileges as root. 233 | 234 | Either one of these entries should be sufficient. 235 | 236 | ```sudoers 237 | ansible ALL=(root) NOPASSWD: /usr/sbin/qm 238 | ansible ALL=(root) ALL 239 | ``` 240 | 241 | #### Regenerating the Inventory 242 | 243 | After updating the cluster configuration, you must rebuild inventory.yaml. 244 | There is a helper script to do this. 245 | 246 | ```sh 247 | python3 scripts/update_inventory.py 248 | ``` 249 | 250 | ### Deploying VMs on Proxmox 251 | 252 | This repo contains plays for deploying VMs from cloud images to a proxmox cluster. 253 | 254 | An API token capable of creating templates and VMs in proxmox is required. 255 | I applied the PVEAdmin role to my token on the root resource. 256 | 257 | #### Adding Ansible Secret Values 258 | 259 | The proxmox playbooks require two secret values: the cloud-init user password and the proxmox API token secret. 260 | 261 | The cloud-init user password can be anything you want. 262 | It is only used for serial console logins in case SSH is not working. 263 | 264 | Open vault.example.yml in _collections/sysengquick/k3s/playbooks/vars/proxmox_. 265 | Copy the contents of this file over the contents of vault.yml. 266 | Update the values as appropriate. 267 | 268 | In ansible.cfg, there is a vault password file. 269 | Write your vault password to a file and update the path as needed. 270 | Encrypt the vault.yml file with ansible-vault. 271 | 272 | ```sh 273 | ansible-vault encrypt --vault-pass-file /private/ansible/vault/sysengquick/k3s vault.yml 274 | ``` 275 | 276 | Replace the path in the command as appropriate. 277 | 278 | #### Configure the Proxmox Role Defaults 279 | 280 | Several options may need changed in the proxmox role defaults. 281 | Open the file main.yml in _collections/sysengquick/k3s/roles/proxmox/defaults_. 282 | 283 | ##### proxmox_api 284 | 285 | In the proxmox_api dictionary, you may need to change the token_id or user to match your token. 286 | 287 | ##### proxmox_image_delete 288 | 289 | Change this to true if you want ansible to delete the cloud image file after it creates the template. 290 | 291 | ##### promox_image_dest 292 | 293 | This is where the cloud image disk file is saved. 294 | 295 | ##### proxmox_node 296 | 297 | This field must match one of the nodes in your proxmox cluster. 298 | 299 | **NOTE**: I don't have a multi-node cluster. 300 | If you want to place VMs on different nodes, migrate them after creation. 301 | 302 | ##### proxmox_template 303 | 304 | This dictionary contains base settings the VMs will inherit. 305 | You can alter the settings after creation if you want to tweak them individually. 306 | 307 | | Template Property | Description | 308 | | ----------------- | --------------------------------------------------------------------------------------------- | 309 | | block_storage | False for disk files (e.g. qcow2/vmdk). True for block storage devices (e.g. lvm/zfs volumes) | 310 | | bridge | Proxmox bridge device to attach your VM NICs | 311 | | ciusr | cloud-init username | 312 | | cipassword | cloud-init password | 313 | | cores | CPU cores | 314 | | disk_format | disk file format (e.g. qcow2/raw/vmdk) -- ignored when block_storage is true | 315 | | image | cloud image to build template from (see vars/main.yml for options) | 316 | | memory | RAM in mebibytes (i.e. powers of 2, not 10) | 317 | | name | Template name | 318 | | size | Disk size | 319 | | ssd | True if your storage pool is on SSD | 320 | | sshkeys | SSH authorized keys to add to the cloud-init user | 321 | | storage | Promox storage pool to place the VM/template disks | 322 | | vmid | The ID to use for the template | 323 | 324 | ##### proxmox_timeouts 325 | 326 | The proxmox_timeouts dictionary is how long (in seconds) to wait for certain actions. 327 | 328 | | Timeout | Description | 329 | | -------- | ------------------------------------------------------ | 330 | | api | Timeout for creating a VM from the template | 331 | | creation | Timeout for between VM creation before continuing | 332 | | startup | Timeout for SSH login to work on the newly created VMs | 333 | 334 | ##### proxmox_use_become 335 | 336 | proxmox_use_become should be set true if your user has full sudo privileges. 337 | If you just added the qm disk import command without a password, leave it false. 338 | 339 | #### Configuring the Cloud Images 340 | 341 | In the proxmox role vars, proxmox_images defines the usable cloud images. 342 | There are four images defined currently: alma_linux_9, debian_12, ubuntu_2204, and ubuntu_2204_minimal. 343 | 344 | I have done the most testing with debian_12 (aka bookworm), but any of these shold work. 345 | 346 | If you want to add another cloud image, you must fill out a new entry in this dictionary. 347 | 348 | I was not able to import Oracle Linux 8 or 9 cloud images. 349 | Proxmox didn't seem to understand the disk format it was using. 350 | I'd love a pull request if you can make this work. 351 | 352 | | Image Property | Description | 353 | | -------------- | ---------------------------------------------------------- | 354 | | base | Base URL to download the cloud image disk and digest files | 355 | | name | Name of the disk image file in base | 356 | | digest | Name of the checksum file in base | 357 | | method | Hash algorithm in the digest file (e.g. sha256, sha512) | 358 | 359 | You should be able to concatenate the base property with the name or digest to get a full URL to the file. 360 | The base property must end with a trailing slash. 361 | 362 | #### create_cluster playbook 363 | 364 | When you are done tweaking your settings, run the cluster_create playbook. 365 | 366 | ```sh 367 | ansible-playbook sysengquick.k3s.create_cluster -K 368 | ``` 369 | 370 | If you don't need a sudo password, you can press enter when prompted or leave off the -K option. 371 | 372 | #### remove_cluster playbook 373 | 374 | If you want to tear down your cluster, run the remove_cluster playbook. 375 | 376 | ```sh 377 | ansible-playbook sysengquick.k3s.remove_cluster 378 | ``` 379 | 380 | ### Deploying k3s on Your VMs 381 | 382 | Once you have your VMs, you're ready to install k3s. 383 | 384 | **NOTE**: You do not need proxmox for this part. 385 | Any VMs capable of running k3s should work. 386 | 387 | #### Configure the k3sup Role Defaults 388 | 389 | There are some values that may need updated in the k3sup role defaults. 390 | Open `main.yml` in _collections/sysengquick/k3s/roles/k3sup/defaults_. 391 | 392 | ##### k3sup_context 393 | 394 | This is the kubeconfig context. 395 | It's largely cosmetic. 396 | 397 | ##### k3sup_debug 398 | 399 | Set true to get debug output printed about the console commands run by the playbook. 400 | 401 | ##### k3sup_extra_args 402 | 403 | Array of additional arguments to pass to k3s. 404 | 405 | ##### k3sup_iface 406 | 407 | The default interface on the VMs. 408 | 409 | This is needed for a few things in the playbooks. 410 | There is no provision for handling VMs with different default interfaces. 411 | 412 | ##### k3sup_ip_range 413 | 414 | Rnage of IPs that kube-vip cloud controller will assign to service loadbalancers. 415 | 416 | For a single IP, make start and end the same. 417 | This is untested, but it should work. 418 | 419 | **NOTE**: This value is not used if kube-vip cloud controller is not used. 420 | 421 | ##### k3sup_k3s_version 422 | 423 | The version of k3s to install. 424 | To install rancher, make sure you pick a compatible version. 425 | 426 | ##### k3sup_k3s_server_noschedule 427 | 428 | Set this to true to prevent scheduling workloads on control planes. 429 | 430 | It adds the node taint node-role.kubernetes.io/control-plane:NoSchedule to the cluster. 431 | 432 | ##### k3sup_kube_vip_lease_XXX 433 | 434 | These properties control how timeouts on the kube-vip virtual IP leadership elections. 435 | You probably don't need to change these. 436 | 437 | ##### k3sup_kube_vip_version 438 | 439 | Which version of kube-vip to install. 440 | 441 | ##### k3sup_local_path 442 | 443 | Where to instll the kubectl configuration file on the local host. 444 | 445 | ##### k3sup_prefer_ip 446 | 447 | When true, k3sup will connect to the nodes by IP and not hostname. 448 | 449 | ##### k3sup_ssh_key 450 | 451 | Path to the SSH key k3sup will use to connect to the nodes. 452 | 453 | ##### k3sup_use_kube_vip / k3sup_use_kube_vip_cloud_controller 454 | 455 | When true, these components will be installed on your k3s cluster. 456 | Any combination should work (both, neither, or only one). 457 | 458 | ##### k3sup_vip 459 | 460 | The virtual IP to share with kube-vip. 461 | 462 | **NOTE**: This property is unused if kube-vip is not installed. 463 | 464 | #### deploy_k3s playbook 465 | 466 | Once your values have been set, run the deploy_k3s playbook. 467 | 468 | ```sh 469 | ansible-playbook sysengquick.k3s.deploy_k3s 470 | ``` 471 | 472 | ### Deploying Rancher to Your Cluster 473 | 474 | Once your cluster is up, the final step is to deploy rancher. 475 | 476 | #### Configure the rancher Role Defaults 477 | 478 | Take a look at the rancher role defaults. 479 | Open `main.yml` in _collections/sysengquick/k3s/roles/rancher/defaults_. 480 | 481 | ##### rancher_bootstrap_password 482 | 483 | This is the password used to connect to rancher the first time. 484 | You should change this after installation, so the bootstrap password isn't really important. 485 | 486 | ##### rancher_cert_manager_version 487 | 488 | This is the version of cert-manager to install. 489 | 490 | ##### rancher_debug 491 | 492 | Similar to k3sup_debug. 493 | Displays command module output after commands run by the playbooks. 494 | 495 | ##### rancher_hostname 496 | 497 | This is the default hostname of your rancher UI. 498 | You can change this later, but it might be easier to change it here. 499 | 500 | ##### rancher_lb_service_enable 501 | 502 | Set to true if you want to use a service loadbalancer. 503 | This might be useful if you disable the default traefik in k3s. 504 | 505 | ##### rancher_lb_service_name 506 | 507 | This is the name of the service loadbalancer if enabled. 508 | 509 | ##### rancher_replicas 510 | 511 | This is how many rancher replicas to run. 512 | 3 is a good default for an HA deployment. 513 | 514 | ##### rancher_version 515 | 516 | This is the version of rancher to install. 517 | Ensure you pick a version compatible with your selected k3s version. 518 | 519 | #### deploy_rancher playbook 520 | 521 | After you've gone through the defaults, run the deploy_rancher playbook. 522 | 523 | ```sh 524 | ansible-playbook sysengquick.k3s.deploy_rancher 525 | ``` 526 | 527 | ## Additional Resources 528 | 529 | Check out the YouTube playlist in the YouTube Series link at the top. 530 | 531 | If you need help or something isn't working, file an issue on the github repo. 532 | -------------------------------------------------------------------------------- /logs/turnup.log: -------------------------------------------------------------------------------- 1 | + ansible-playbook sysengquick.k3s.create_cluster 2 | 3 | PLAY [Create proxmox template] ************************************************* 4 | 5 | TASK [Create proxmox template] ************************************************* 6 | 7 | TASK [sysengquick.k3s.proxmox : Test sudo access] ****************************** 8 | skipping: [pve.local.technoplaza.net] 9 | 10 | TASK [sysengquick.k3s.proxmox : Create template VM] **************************** 11 | changed: [pve.local.technoplaza.net -> localhost] 12 | 13 | TASK [sysengquick.k3s.proxmox : Download cloud image] ************************** 14 | changed: [pve.local.technoplaza.net] 15 | 16 | TASK [sysengquick.k3s.proxmox : Get disk imfo] ********************************* 17 | ok: [pve.local.technoplaza.net -> localhost] 18 | 19 | TASK [sysengquick.k3s.proxmox : Register template_attach_disk] ***************** 20 | ok: [pve.local.technoplaza.net] 21 | 22 | TASK [sysengquick.k3s.proxmox : Get disk info] ********************************* 23 | ok: [pve.local.technoplaza.net -> localhost] 24 | 25 | TASK [sysengquick.k3s.proxmox : Register disk status] ************************** 26 | ok: [pve.local.technoplaza.net] 27 | 28 | TASK [sysengquick.k3s.proxmox : Create disk from cloud image] ****************** 29 | changed: [pve.local.technoplaza.net] 30 | 31 | TASK [sysengquick.k3s.proxmox : Create disk from cloud image] ****************** 32 | skipping: [pve.local.technoplaza.net] 33 | 34 | TASK [sysengquick.k3s.proxmox : Attach disk] *********************************** 35 | changed: [pve.local.technoplaza.net -> localhost] 36 | 37 | TASK [sysengquick.k3s.proxmox : Remove cloud image download] ******************* 38 | skipping: [pve.local.technoplaza.net] 39 | 40 | TASK [sysengquick.k3s.proxmox : Ensure disk size] ****************************** 41 | changed: [pve.local.technoplaza.net -> localhost] 42 | 43 | TASK [sysengquick.k3s.proxmox : Update disk properties] ************************ 44 | changed: [pve.local.technoplaza.net -> localhost] 45 | 46 | TASK [sysengquick.k3s.proxmox : Make the VM a template] ************************ 47 | changed: [pve.local.technoplaza.net -> localhost] 48 | 49 | PLAY [Create proxmox nodes] **************************************************** 50 | 51 | TASK [Create proxmox nodes] **************************************************** 52 | 53 | TASK [sysengquick.k3s.proxmox : Run tasks on all k3s nodes] ******************** 54 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/create_node.yml for localhost => (item={'name': 's1.k3s.local.technoplaza.net', 'ip': '192.168.1.51'}) 55 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/create_node.yml for localhost => (item={'name': 's2.k3s.local.technoplaza.net', 'ip': '192.168.1.52'}) 56 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/create_node.yml for localhost => (item={'name': 's3.k3s.local.technoplaza.net', 'ip': '192.168.1.53'}) 57 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/create_node.yml for localhost => (item={'name': 'w1.k3s.local.technoplaza.net', 'ip': '192.168.1.54'}) 58 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/create_node.yml for localhost => (item={'name': 'w2.k3s.local.technoplaza.net', 'ip': '192.168.1.55'}) 59 | 60 | TASK [sysengquick.k3s.proxmox : Create k3s node from template] ***************** 61 | changed: [localhost] 62 | 63 | TASK [sysengquick.k3s.proxmox : Register vmid] ********************************* 64 | ok: [localhost] 65 | 66 | TASK [sysengquick.k3s.proxmox : Update IP configuration] *********************** 67 | changed: [localhost] 68 | 69 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 70 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/start_node.yml for localhost 71 | 72 | TASK [sysengquick.k3s.proxmox : Assert variable definitions] ******************* 73 | ok: [localhost] => { 74 | "changed": false, 75 | "msg": "All assertions passed" 76 | } 77 | 78 | TASK [sysengquick.k3s.proxmox : Pause for proxmox] ***************************** 79 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/pause_for_proxmox.yml for localhost 80 | 81 | TASK [sysengquick.k3s.proxmox : Pause for proxmox to catch up] ***************** 82 | Pausing for 15 seconds 83 | (ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort) 84 | ok: [localhost] 85 | 86 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 87 | changed: [localhost] 88 | 89 | TASK [sysengquick.k3s.proxmox : Create k3s node from template] ***************** 90 | changed: [localhost] 91 | 92 | TASK [sysengquick.k3s.proxmox : Register vmid] ********************************* 93 | ok: [localhost] 94 | 95 | TASK [sysengquick.k3s.proxmox : Update IP configuration] *********************** 96 | changed: [localhost] 97 | 98 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 99 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/start_node.yml for localhost 100 | 101 | TASK [sysengquick.k3s.proxmox : Assert variable definitions] ******************* 102 | ok: [localhost] => { 103 | "changed": false, 104 | "msg": "All assertions passed" 105 | } 106 | 107 | TASK [sysengquick.k3s.proxmox : Pause for proxmox] ***************************** 108 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/pause_for_proxmox.yml for localhost 109 | 110 | TASK [sysengquick.k3s.proxmox : Pause for proxmox to catch up] ***************** 111 | Pausing for 15 seconds 112 | (ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort) 113 | ok: [localhost] 114 | 115 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 116 | changed: [localhost] 117 | 118 | TASK [sysengquick.k3s.proxmox : Create k3s node from template] ***************** 119 | changed: [localhost] 120 | 121 | TASK [sysengquick.k3s.proxmox : Register vmid] ********************************* 122 | ok: [localhost] 123 | 124 | TASK [sysengquick.k3s.proxmox : Update IP configuration] *********************** 125 | changed: [localhost] 126 | 127 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 128 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/start_node.yml for localhost 129 | 130 | TASK [sysengquick.k3s.proxmox : Assert variable definitions] ******************* 131 | ok: [localhost] => { 132 | "changed": false, 133 | "msg": "All assertions passed" 134 | } 135 | 136 | TASK [sysengquick.k3s.proxmox : Pause for proxmox] ***************************** 137 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/pause_for_proxmox.yml for localhost 138 | 139 | TASK [sysengquick.k3s.proxmox : Pause for proxmox to catch up] ***************** 140 | Pausing for 15 seconds 141 | (ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort) 142 | ok: [localhost] 143 | 144 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 145 | changed: [localhost] 146 | 147 | TASK [sysengquick.k3s.proxmox : Create k3s node from template] ***************** 148 | changed: [localhost] 149 | 150 | TASK [sysengquick.k3s.proxmox : Register vmid] ********************************* 151 | ok: [localhost] 152 | 153 | TASK [sysengquick.k3s.proxmox : Update IP configuration] *********************** 154 | changed: [localhost] 155 | 156 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 157 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/start_node.yml for localhost 158 | 159 | TASK [sysengquick.k3s.proxmox : Assert variable definitions] ******************* 160 | ok: [localhost] => { 161 | "changed": false, 162 | "msg": "All assertions passed" 163 | } 164 | 165 | TASK [sysengquick.k3s.proxmox : Pause for proxmox] ***************************** 166 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/pause_for_proxmox.yml for localhost 167 | 168 | TASK [sysengquick.k3s.proxmox : Pause for proxmox to catch up] ***************** 169 | Pausing for 15 seconds 170 | (ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort) 171 | ok: [localhost] 172 | 173 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 174 | changed: [localhost] 175 | 176 | TASK [sysengquick.k3s.proxmox : Create k3s node from template] ***************** 177 | changed: [localhost] 178 | 179 | TASK [sysengquick.k3s.proxmox : Register vmid] ********************************* 180 | ok: [localhost] 181 | 182 | TASK [sysengquick.k3s.proxmox : Update IP configuration] *********************** 183 | changed: [localhost] 184 | 185 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 186 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/start_node.yml for localhost 187 | 188 | TASK [sysengquick.k3s.proxmox : Assert variable definitions] ******************* 189 | ok: [localhost] => { 190 | "changed": false, 191 | "msg": "All assertions passed" 192 | } 193 | 194 | TASK [sysengquick.k3s.proxmox : Pause for proxmox] ***************************** 195 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/pause_for_proxmox.yml for localhost 196 | 197 | TASK [sysengquick.k3s.proxmox : Pause for proxmox to catch up] ***************** 198 | Pausing for 15 seconds 199 | (ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort) 200 | ok: [localhost] 201 | 202 | TASK [sysengquick.k3s.proxmox : Start node] ************************************ 203 | changed: [localhost] 204 | 205 | PLAY [Prep nodes] ************************************************************** 206 | 207 | TASK [Prep nodes] ************************************************************** 208 | 209 | TASK [sysengquick.k3s.proxmox : Wait for server to startup] ******************** 210 | ok: [s2.k3s.local.technoplaza.net] 211 | ok: [w1.k3s.local.technoplaza.net] 212 | ok: [s1.k3s.local.technoplaza.net] 213 | ok: [s3.k3s.local.technoplaza.net] 214 | ok: [w2.k3s.local.technoplaza.net] 215 | 216 | TASK [sysengquick.k3s.proxmox : Install packages] ****************************** 217 | FAILED - RETRYING: [w2.k3s.local.technoplaza.net]: Install packages (3 retries left). 218 | changed: [s3.k3s.local.technoplaza.net] 219 | changed: [s1.k3s.local.technoplaza.net] 220 | changed: [s2.k3s.local.technoplaza.net] 221 | changed: [w1.k3s.local.technoplaza.net] 222 | changed: [w2.k3s.local.technoplaza.net] 223 | 224 | TASK [sysengquick.k3s.proxmox : Start qemu-guest-agent] ************************ 225 | changed: [w1.k3s.local.technoplaza.net] 226 | changed: [s3.k3s.local.technoplaza.net] 227 | changed: [s1.k3s.local.technoplaza.net] 228 | changed: [s2.k3s.local.technoplaza.net] 229 | changed: [w2.k3s.local.technoplaza.net] 230 | 231 | TASK [sysengquick.k3s.proxmox : Load installed packages] *********************** 232 | ok: [s2.k3s.local.technoplaza.net] 233 | ok: [w1.k3s.local.technoplaza.net] 234 | ok: [s3.k3s.local.technoplaza.net] 235 | ok: [w2.k3s.local.technoplaza.net] 236 | ok: [s1.k3s.local.technoplaza.net] 237 | 238 | TASK [sysengquick.k3s.proxmox : Stop and disable ufw] ************************** 239 | skipping: [s1.k3s.local.technoplaza.net] 240 | skipping: [s2.k3s.local.technoplaza.net] 241 | skipping: [s3.k3s.local.technoplaza.net] 242 | skipping: [w1.k3s.local.technoplaza.net] 243 | skipping: [w2.k3s.local.technoplaza.net] 244 | 245 | PLAY [Snapshot cluster] ******************************************************** 246 | 247 | TASK [Snapshot all k3s nodes] ************************************************** 248 | 249 | TASK [sysengquick.k3s.proxmox : Run tasks on all k3s nodes] ******************** 250 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/snapshot.yml for localhost => (item={'name': 's1.k3s.local.technoplaza.net', 'ip': '192.168.1.51'}) 251 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/snapshot.yml for localhost => (item={'name': 's2.k3s.local.technoplaza.net', 'ip': '192.168.1.52'}) 252 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/snapshot.yml for localhost => (item={'name': 's3.k3s.local.technoplaza.net', 'ip': '192.168.1.53'}) 253 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/snapshot.yml for localhost => (item={'name': 'w1.k3s.local.technoplaza.net', 'ip': '192.168.1.54'}) 254 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/proxmox/tasks/snapshot.yml for localhost => (item={'name': 'w2.k3s.local.technoplaza.net', 'ip': '192.168.1.55'}) 255 | 256 | TASK [sysengquick.k3s.proxmox : Check proxmox_snapshot variable] *************** 257 | ok: [localhost] => { 258 | "changed": false, 259 | "msg": "All assertions passed" 260 | } 261 | 262 | TASK [sysengquick.k3s.proxmox : Proxmox snapshot] ****************************** 263 | changed: [localhost] 264 | 265 | TASK [sysengquick.k3s.proxmox : Check proxmox_snapshot variable] *************** 266 | ok: [localhost] => { 267 | "changed": false, 268 | "msg": "All assertions passed" 269 | } 270 | 271 | TASK [sysengquick.k3s.proxmox : Proxmox snapshot] ****************************** 272 | changed: [localhost] 273 | 274 | TASK [sysengquick.k3s.proxmox : Check proxmox_snapshot variable] *************** 275 | ok: [localhost] => { 276 | "changed": false, 277 | "msg": "All assertions passed" 278 | } 279 | 280 | TASK [sysengquick.k3s.proxmox : Proxmox snapshot] ****************************** 281 | changed: [localhost] 282 | 283 | TASK [sysengquick.k3s.proxmox : Check proxmox_snapshot variable] *************** 284 | ok: [localhost] => { 285 | "changed": false, 286 | "msg": "All assertions passed" 287 | } 288 | 289 | TASK [sysengquick.k3s.proxmox : Proxmox snapshot] ****************************** 290 | changed: [localhost] 291 | 292 | TASK [sysengquick.k3s.proxmox : Check proxmox_snapshot variable] *************** 293 | ok: [localhost] => { 294 | "changed": false, 295 | "msg": "All assertions passed" 296 | } 297 | 298 | TASK [sysengquick.k3s.proxmox : Proxmox snapshot] ****************************** 299 | changed: [localhost] 300 | 301 | PLAY RECAP ********************************************************************* 302 | localhost : ok=60 changed=20 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 303 | pve.local.technoplaza.net : ok=11 changed=7 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 304 | s1.k3s.local.technoplaza.net : ok=4 changed=2 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 305 | s2.k3s.local.technoplaza.net : ok=4 changed=2 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 306 | s3.k3s.local.technoplaza.net : ok=4 changed=2 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 307 | w1.k3s.local.technoplaza.net : ok=4 changed=2 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 308 | w2.k3s.local.technoplaza.net : ok=4 changed=2 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 309 | 310 | 311 | real 2m59.554s 312 | user 0m29.133s 313 | sys 0m6.359s 314 | + ansible-playbook sysengquick.k3s.deploy_k3s 315 | 316 | PLAY [Deploy k3s] ************************************************************** 317 | 318 | TASK [Deploy k3s] ************************************************************** 319 | 320 | TASK [sysengquick.k3s.k3sup : Deploy cluster] ********************************** 321 | changed: [localhost] 322 | 323 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 324 | ok: [localhost] => { 325 | "msg": { 326 | "cmd": [ 327 | "/usr/local/bin/k3sup", 328 | "install", 329 | "--cluster", 330 | "--context", 331 | "k3s-ha", 332 | "--ip", 333 | "192.168.1.51", 334 | "--k3s-extra-args", 335 | "--disable servicelb", 336 | "--k3s-version", 337 | "v1.27.11+k3s1", 338 | "--local-path", 339 | "/home/vscode/.kube/config", 340 | "--ssh-key", 341 | "/home/vscode/.ssh/id_ed25519", 342 | "--tls-san", 343 | "192.168.1.50", 344 | "--user", 345 | "pve" 346 | ], 347 | "stdout": [ 348 | "Running: k3sup install", 349 | "Public IP: 192.168.1.51", 350 | "[INFO] Using v1.27.11+k3s1 as release", 351 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 352 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 353 | "[INFO] Verifying binary download", 354 | "[INFO] Installing k3s to /usr/local/bin/k3s", 355 | "[INFO] Skipping installation of SELinux RPM", 356 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 357 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 358 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 359 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 360 | "[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh", 361 | "[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env", 362 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s.service", 363 | "[INFO] systemd: Enabling k3s unit", 364 | "[INFO] systemd: Starting k3s", 365 | "Result: [INFO] Using v1.27.11+k3s1 as release", 366 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 367 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 368 | "[INFO] Verifying binary download", 369 | "[INFO] Installing k3s to /usr/local/bin/k3s", 370 | "[INFO] Skipping installation of SELinux RPM", 371 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 372 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 373 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 374 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 375 | "[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh", 376 | "[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env", 377 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s.service", 378 | "[INFO] systemd: Enabling k3s unit", 379 | "[INFO] systemd: Starting k3s", 380 | " Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.", 381 | "", 382 | "Saving file to: /home/vscode/.kube/config", 383 | "", 384 | "# Test your cluster with:", 385 | "export KUBECONFIG=/home/vscode/.kube/config", 386 | "kubectl config use-context k3s-ha", 387 | "kubectl get node -o wide", 388 | "", 389 | "🚀 Speed up GitHub Actions/GitLab CI + reduce costs: https://actuated.dev" 390 | ] 391 | } 392 | } 393 | 394 | PLAY [Setup kube-vip] ********************************************************** 395 | 396 | TASK [Setup kube-vip] ********************************************************** 397 | 398 | TASK [sysengquick.k3s.k3sup : Include kube_vip tasks] ************************** 399 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/k3sup/tasks/setup_kube_vip_tasks.yml for s1.k3s.local.technoplaza.net 400 | 401 | TASK [sysengquick.k3s.k3sup : Apply kube-vip RBAC manifest] ******************** 402 | changed: [s1.k3s.local.technoplaza.net] 403 | 404 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 405 | ok: [s1.k3s.local.technoplaza.net] => { 406 | "msg": { 407 | "cmd": [ 408 | "/usr/local/bin/kubectl", 409 | "apply", 410 | "-f", 411 | "https://kube-vip.io/manifests/rbac.yaml" 412 | ], 413 | "stdout": [ 414 | "serviceaccount/kube-vip created", 415 | "clusterrole.rbac.authorization.k8s.io/system:kube-vip-role created", 416 | "clusterrolebinding.rbac.authorization.k8s.io/system:kube-vip-binding created" 417 | ] 418 | } 419 | } 420 | 421 | TASK [sysengquick.k3s.k3sup : Fetch kube-vip container image] ****************** 422 | changed: [s1.k3s.local.technoplaza.net] 423 | 424 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 425 | ok: [s1.k3s.local.technoplaza.net] => { 426 | "msg": { 427 | "cmd": [ 428 | "/usr/local/bin/ctr", 429 | "image", 430 | "pull", 431 | "ghcr.io/kube-vip/kube-vip:v0.7.1" 432 | ], 433 | "stdout": [ 434 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolving |\u001b[32m\u001b[0m--------------------------------------| ", 435 | "elapsed: 0.1 s total: 0.0 B (0.0 B/s) ", 436 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolving |\u001b[32m\u001b[0m--------------------------------------| ", 437 | "elapsed: 0.2 s total: 0.0 B (0.0 B/s) ", 438 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 439 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: waiting |\u001b[32m\u001b[0m--------------------------------------| ", 440 | "elapsed: 0.3 s total: 0.0 B (0.0 B/s) ", 441 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 442 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/3.8 KiB ", 443 | "elapsed: 0.4 s total: 0.0 B (0.0 B/s) ", 444 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 445 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/3.8 KiB ", 446 | "elapsed: 0.5 s total: 0.0 B (0.0 B/s) ", 447 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 448 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/3.8 KiB ", 449 | "elapsed: 0.6 s total: 0.0 B (0.0 B/s) ", 450 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 451 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 452 | "manifest-sha256:58ce44dc60694b0aa547d87d4a8337133961d3a8538021a672ba9bd33b267c9a: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/672.0 B ", 453 | "elapsed: 0.7 s total: 3.8 Ki (5.4 KiB/s) ", 454 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 455 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 456 | "manifest-sha256:58ce44dc60694b0aa547d87d4a8337133961d3a8538021a672ba9bd33b267c9a: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 457 | "config-sha256:22aaebb38f4a9f54562fab7b3a59b206e32f59a368c5749c96d06f5a1c187dba: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/875.0 B ", 458 | "layer-sha256:a220db869ed1ac1f9b2c0609be138bed84b2ed0e7a94d10cd3a05e8eaefc0a52: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/14.8 MiB ", 459 | "layer-sha256:d058a25998cd9089af4fa61a2abeb7d820cd93ecc3edaf215c0f9b7291600077: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/120.7 KiB ", 460 | "elapsed: 0.8 s total: 4.5 Ki (5.6 KiB/s) ", 461 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 462 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 463 | "manifest-sha256:58ce44dc60694b0aa547d87d4a8337133961d3a8538021a672ba9bd33b267c9a: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 464 | "config-sha256:22aaebb38f4a9f54562fab7b3a59b206e32f59a368c5749c96d06f5a1c187dba: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/875.0 B ", 465 | "layer-sha256:a220db869ed1ac1f9b2c0609be138bed84b2ed0e7a94d10cd3a05e8eaefc0a52: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/14.8 MiB ", 466 | "layer-sha256:d058a25998cd9089af4fa61a2abeb7d820cd93ecc3edaf215c0f9b7291600077: downloading |\u001b[32m\u001b[0m--------------------------------------| 0.0 B/120.7 KiB ", 467 | "elapsed: 0.9 s total: 4.5 Ki (4.9 KiB/s) ", 468 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 469 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 470 | "manifest-sha256:58ce44dc60694b0aa547d87d4a8337133961d3a8538021a672ba9bd33b267c9a: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 471 | "config-sha256:22aaebb38f4a9f54562fab7b3a59b206e32f59a368c5749c96d06f5a1c187dba: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 472 | "layer-sha256:a220db869ed1ac1f9b2c0609be138bed84b2ed0e7a94d10cd3a05e8eaefc0a52: downloading |\u001b[32m+++++++\u001b[0m-------------------------------| 3.0 MiB/14.8 MiB ", 473 | "layer-sha256:d058a25998cd9089af4fa61a2abeb7d820cd93ecc3edaf215c0f9b7291600077: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 474 | "elapsed: 1.0 s total: 3.1 Mi (3.1 MiB/s) ", 475 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 476 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 477 | "manifest-sha256:58ce44dc60694b0aa547d87d4a8337133961d3a8538021a672ba9bd33b267c9a: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 478 | "config-sha256:22aaebb38f4a9f54562fab7b3a59b206e32f59a368c5749c96d06f5a1c187dba: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 479 | "layer-sha256:a220db869ed1ac1f9b2c0609be138bed84b2ed0e7a94d10cd3a05e8eaefc0a52: downloading |\u001b[32m+++++++++++++++++++++++++++++++++++\u001b[0m---| 14.0 MiB/14.8 MiB ", 480 | "layer-sha256:d058a25998cd9089af4fa61a2abeb7d820cd93ecc3edaf215c0f9b7291600077: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 481 | "elapsed: 1.1 s total: 14.1 M (12.8 MiB/s) ", 482 | "ghcr.io/kube-vip/kube-vip:v0.7.1: resolved |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 483 | "index-sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 484 | "manifest-sha256:58ce44dc60694b0aa547d87d4a8337133961d3a8538021a672ba9bd33b267c9a: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 485 | "config-sha256:22aaebb38f4a9f54562fab7b3a59b206e32f59a368c5749c96d06f5a1c187dba: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 486 | "layer-sha256:a220db869ed1ac1f9b2c0609be138bed84b2ed0e7a94d10cd3a05e8eaefc0a52: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 487 | "layer-sha256:d058a25998cd9089af4fa61a2abeb7d820cd93ecc3edaf215c0f9b7291600077: done |\u001b[32m++++++++++++++++++++++++++++++++++++++\u001b[0m| ", 488 | "elapsed: 1.2 s total: 14.1 M (11.8 MiB/s) ", 489 | "unpacking linux/amd64 sha256:82698885b3b5f926cd940b7000549f3d43850cb6565a708162900c1475a83016...", 490 | "done: 269.593652ms\t" 491 | ] 492 | } 493 | } 494 | 495 | TASK [sysengquick.k3s.k3sup : Apply kube-vip daemonset manifest] *************** 496 | changed: [s1.k3s.local.technoplaza.net] 497 | 498 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 499 | ok: [s1.k3s.local.technoplaza.net] => { 500 | "msg": { 501 | "cmd": "set -o pipefail && /usr/local/bin/ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:v0.7.1 vip /kube-vip manifest daemonset --address 192.168.1.50 --arp --controlplane --inCluster --interface eth0 --leaderElection --leaseDuration 30 --leaseRenewDuration 10 --leaseRetry 3 --services --taint | tee /var/lib/rancher/k3s/server/manifests/kube-vip.yaml", 502 | "stdout": [ 503 | "apiVersion: apps/v1", 504 | "kind: DaemonSet", 505 | "metadata:", 506 | " creationTimestamp: null", 507 | " labels:", 508 | " app.kubernetes.io/name: kube-vip-ds", 509 | " app.kubernetes.io/version: v0.7.1", 510 | " name: kube-vip-ds", 511 | " namespace: kube-system", 512 | "spec:", 513 | " selector:", 514 | " matchLabels:", 515 | " app.kubernetes.io/name: kube-vip-ds", 516 | " template:", 517 | " metadata:", 518 | " creationTimestamp: null", 519 | " labels:", 520 | " app.kubernetes.io/name: kube-vip-ds", 521 | " app.kubernetes.io/version: v0.7.1", 522 | " spec:", 523 | " affinity:", 524 | " nodeAffinity:", 525 | " requiredDuringSchedulingIgnoredDuringExecution:", 526 | " nodeSelectorTerms:", 527 | " - matchExpressions:", 528 | " - key: node-role.kubernetes.io/master", 529 | " operator: Exists", 530 | " - matchExpressions:", 531 | " - key: node-role.kubernetes.io/control-plane", 532 | " operator: Exists", 533 | " containers:", 534 | " - args:", 535 | " - manager", 536 | " env:", 537 | " - name: vip_arp", 538 | " value: \"true\"", 539 | " - name: port", 540 | " value: \"6443\"", 541 | " - name: vip_interface", 542 | " value: eth0", 543 | " - name: vip_cidr", 544 | " value: \"32\"", 545 | " - name: dns_mode", 546 | " value: first", 547 | " - name: cp_enable", 548 | " value: \"true\"", 549 | " - name: cp_namespace", 550 | " value: kube-system", 551 | " - name: svc_enable", 552 | " value: \"true\"", 553 | " - name: svc_leasename", 554 | " value: plndr-svcs-lock", 555 | " - name: vip_leaderelection", 556 | " value: \"true\"", 557 | " - name: vip_leasename", 558 | " value: plndr-cp-lock", 559 | " - name: vip_leaseduration", 560 | " value: \"30\"", 561 | " - name: vip_renewdeadline", 562 | " value: \"10\"", 563 | " - name: vip_retryperiod", 564 | " value: \"3\"", 565 | " - name: address", 566 | " value: 192.168.1.50", 567 | " - name: prometheus_server", 568 | " value: :2112", 569 | " image: ghcr.io/kube-vip/kube-vip:v0.7.1", 570 | " imagePullPolicy: Always", 571 | " name: kube-vip", 572 | " resources: {}", 573 | " securityContext:", 574 | " capabilities:", 575 | " add:", 576 | " - NET_ADMIN", 577 | " - NET_RAW", 578 | " hostNetwork: true", 579 | " serviceAccountName: kube-vip", 580 | " tolerations:", 581 | " - effect: NoSchedule", 582 | " operator: Exists", 583 | " - effect: NoExecute", 584 | " operator: Exists", 585 | " updateStrategy: {}", 586 | "status:", 587 | " currentNumberScheduled: 0", 588 | " desiredNumberScheduled: 0", 589 | " numberMisscheduled: 0", 590 | " numberReady: 0" 591 | ] 592 | } 593 | } 594 | 595 | TASK [sysengquick.k3s.k3sup : Update kubectl config] *************************** 596 | changed: [s1.k3s.local.technoplaza.net -> localhost] 597 | 598 | TASK [Setup kube-vip cloud controller] ***************************************** 599 | 600 | TASK [sysengquick.k3s.k3sup : Include setup_kube_vip_cloud_controller_tasks] *** 601 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/k3sup/tasks/setup_kube_vip_cloud_controller_tasks.yml for s1.k3s.local.technoplaza.net 602 | 603 | TASK [sysengquick.k3s.k3sup : Apply kube-vip cloud controller manifest] ******** 604 | changed: [s1.k3s.local.technoplaza.net] 605 | 606 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 607 | ok: [s1.k3s.local.technoplaza.net] => { 608 | "msg": { 609 | "cmd": [ 610 | "/usr/local/bin/kubectl", 611 | "apply", 612 | "-f", 613 | "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml" 614 | ], 615 | "stdout": [ 616 | "serviceaccount/kube-vip-cloud-controller created", 617 | "clusterrole.rbac.authorization.k8s.io/system:kube-vip-cloud-controller-role created", 618 | "clusterrolebinding.rbac.authorization.k8s.io/system:kube-vip-cloud-controller-binding created", 619 | "deployment.apps/kube-vip-cloud-provider created" 620 | ] 621 | } 622 | } 623 | 624 | TASK [sysengquick.k3s.k3sup : Apply IP Range to cloud controller] ************** 625 | changed: [s1.k3s.local.technoplaza.net] 626 | 627 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 628 | ok: [s1.k3s.local.technoplaza.net] => { 629 | "msg": { 630 | "cmd": "set -o pipefail && /usr/local/bin/kubectl create configmap -n kube-system kubevip -o yaml --dry-run=client --from-literal range-global=192.168.1.60-192.168.1.69 | /usr/local/bin/kubectl apply -f -", 631 | "stdout": [ 632 | "configmap/kubevip created" 633 | ] 634 | } 635 | } 636 | 637 | PLAY [Add additional nodes to k3s cluster] ************************************* 638 | 639 | TASK [Add additional servers to cluster] *************************************** 640 | 641 | TASK [sysengquick.k3s.k3sup : Add servers to k3s cluster] ********************** 642 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/k3sup/tasks/add_servers_loop.yml for localhost => (item={'name': 's2.k3s.local.technoplaza.net', 'ip': '192.168.1.52'}) 643 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/k3sup/tasks/add_servers_loop.yml for localhost => (item={'name': 's3.k3s.local.technoplaza.net', 'ip': '192.168.1.53'}) 644 | 645 | TASK [sysengquick.k3s.k3sup : Add server to k3s cluster] *********************** 646 | changed: [localhost] 647 | 648 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 649 | ok: [localhost] => { 650 | "msg": { 651 | "cmd": [ 652 | "/usr/local/bin/k3sup", 653 | "join", 654 | "--ip", 655 | "192.168.1.52", 656 | "--k3s-extra-args", 657 | "--disable servicelb", 658 | "--k3s-version", 659 | "v1.27.11+k3s1", 660 | "--server", 661 | "--server-ip", 662 | "192.168.1.51", 663 | "--ssh-key", 664 | "/home/vscode/.ssh/id_ed25519", 665 | "--tls-san", 666 | "192.168.1.50", 667 | "--user", 668 | "pve" 669 | ], 670 | "stdout": [ 671 | "Running: k3sup join", 672 | "Joining 192.168.1.52 => 192.168.1.51", 673 | "Received node-token from 192.168.1.51.. ok.", 674 | "[INFO] Using v1.27.11+k3s1 as release", 675 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 676 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 677 | "[INFO] Verifying binary download", 678 | "[INFO] Installing k3s to /usr/local/bin/k3s", 679 | "[INFO] Skipping installation of SELinux RPM", 680 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 681 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 682 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 683 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 684 | "[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh", 685 | "[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env", 686 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s.service", 687 | "[INFO] systemd: Enabling k3s unit", 688 | "[INFO] systemd: Starting k3s", 689 | "Logs: Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.", 690 | "Output: [INFO] Using v1.27.11+k3s1 as release", 691 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 692 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 693 | "[INFO] Verifying binary download", 694 | "[INFO] Installing k3s to /usr/local/bin/k3s", 695 | "[INFO] Skipping installation of SELinux RPM", 696 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 697 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 698 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 699 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 700 | "[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh", 701 | "[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env", 702 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s.service", 703 | "[INFO] systemd: Enabling k3s unit", 704 | "[INFO] systemd: Starting k3s", 705 | "", 706 | "🚀 Speed up GitHub Actions/GitLab CI + reduce costs: https://actuated.dev" 707 | ] 708 | } 709 | } 710 | 711 | TASK [sysengquick.k3s.k3sup : Add server to k3s cluster] *********************** 712 | changed: [localhost] 713 | 714 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 715 | ok: [localhost] => { 716 | "msg": { 717 | "cmd": [ 718 | "/usr/local/bin/k3sup", 719 | "join", 720 | "--ip", 721 | "192.168.1.53", 722 | "--k3s-extra-args", 723 | "--disable servicelb", 724 | "--k3s-version", 725 | "v1.27.11+k3s1", 726 | "--server", 727 | "--server-ip", 728 | "192.168.1.51", 729 | "--ssh-key", 730 | "/home/vscode/.ssh/id_ed25519", 731 | "--tls-san", 732 | "192.168.1.50", 733 | "--user", 734 | "pve" 735 | ], 736 | "stdout": [ 737 | "Running: k3sup join", 738 | "Joining 192.168.1.53 => 192.168.1.51", 739 | "Received node-token from 192.168.1.51.. ok.", 740 | "[INFO] Using v1.27.11+k3s1 as release", 741 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 742 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 743 | "[INFO] Verifying binary download", 744 | "[INFO] Installing k3s to /usr/local/bin/k3s", 745 | "[INFO] Skipping installation of SELinux RPM", 746 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 747 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 748 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 749 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 750 | "[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh", 751 | "[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env", 752 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s.service", 753 | "[INFO] systemd: Enabling k3s unit", 754 | "[INFO] systemd: Starting k3s", 755 | "Logs: Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service.", 756 | "Output: [INFO] Using v1.27.11+k3s1 as release", 757 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 758 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 759 | "[INFO] Verifying binary download", 760 | "[INFO] Installing k3s to /usr/local/bin/k3s", 761 | "[INFO] Skipping installation of SELinux RPM", 762 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 763 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 764 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 765 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 766 | "[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh", 767 | "[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env", 768 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s.service", 769 | "[INFO] systemd: Enabling k3s unit", 770 | "[INFO] systemd: Starting k3s", 771 | "", 772 | "🚀 Speed up GitHub Actions/GitLab CI + reduce costs: https://actuated.dev" 773 | ] 774 | } 775 | } 776 | 777 | TASK [Add workers to cluster] ************************************************** 778 | 779 | TASK [sysengquick.k3s.k3sup : Add workers to k3s cluster] ********************** 780 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/k3sup/tasks/add_workers_loop.yml for localhost => (item={'name': 'w1.k3s.local.technoplaza.net', 'ip': '192.168.1.54'}) 781 | included: /home/vscode/.ansible/collections/ansible_collections/sysengquick/k3s/roles/k3sup/tasks/add_workers_loop.yml for localhost => (item={'name': 'w2.k3s.local.technoplaza.net', 'ip': '192.168.1.55'}) 782 | 783 | TASK [sysengquick.k3s.k3sup : Add worker to k3s cluster] *********************** 784 | changed: [localhost] 785 | 786 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 787 | ok: [localhost] => { 788 | "msg": { 789 | "cmd": [ 790 | "/usr/local/bin/k3sup", 791 | "join", 792 | "--ip", 793 | "192.168.1.54", 794 | "--k3s-version", 795 | "v1.27.11+k3s1", 796 | "--server-ip", 797 | "192.168.1.51", 798 | "--ssh-key", 799 | "/home/vscode/.ssh/id_ed25519", 800 | "--user", 801 | "pve" 802 | ], 803 | "stdout": [ 804 | "Running: k3sup join", 805 | "Joining 192.168.1.54 => 192.168.1.51", 806 | "Received node-token from 192.168.1.51.. ok.", 807 | "[INFO] Using v1.27.11+k3s1 as release", 808 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 809 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 810 | "[INFO] Verifying binary download", 811 | "[INFO] Installing k3s to /usr/local/bin/k3s", 812 | "[INFO] Skipping installation of SELinux RPM", 813 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 814 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 815 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 816 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 817 | "[INFO] Creating uninstall script /usr/local/bin/k3s-agent-uninstall.sh", 818 | "[INFO] env: Creating environment file /etc/systemd/system/k3s-agent.service.env", 819 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s-agent.service", 820 | "[INFO] systemd: Enabling k3s-agent unit", 821 | "[INFO] systemd: Starting k3s-agent", 822 | "Logs: Created symlink /etc/systemd/system/multi-user.target.wants/k3s-agent.service → /etc/systemd/system/k3s-agent.service.", 823 | "Output: [INFO] Using v1.27.11+k3s1 as release", 824 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 825 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 826 | "[INFO] Verifying binary download", 827 | "[INFO] Installing k3s to /usr/local/bin/k3s", 828 | "[INFO] Skipping installation of SELinux RPM", 829 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 830 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 831 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 832 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 833 | "[INFO] Creating uninstall script /usr/local/bin/k3s-agent-uninstall.sh", 834 | "[INFO] env: Creating environment file /etc/systemd/system/k3s-agent.service.env", 835 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s-agent.service", 836 | "[INFO] systemd: Enabling k3s-agent unit", 837 | "[INFO] systemd: Starting k3s-agent", 838 | "", 839 | "🚀 Speed up GitHub Actions/GitLab CI + reduce costs: https://actuated.dev" 840 | ] 841 | } 842 | } 843 | 844 | TASK [sysengquick.k3s.k3sup : Add worker to k3s cluster] *********************** 845 | changed: [localhost] 846 | 847 | TASK [sysengquick.k3s.k3sup : Display output] ********************************** 848 | ok: [localhost] => { 849 | "msg": { 850 | "cmd": [ 851 | "/usr/local/bin/k3sup", 852 | "join", 853 | "--ip", 854 | "192.168.1.55", 855 | "--k3s-version", 856 | "v1.27.11+k3s1", 857 | "--server-ip", 858 | "192.168.1.51", 859 | "--ssh-key", 860 | "/home/vscode/.ssh/id_ed25519", 861 | "--user", 862 | "pve" 863 | ], 864 | "stdout": [ 865 | "Running: k3sup join", 866 | "Joining 192.168.1.55 => 192.168.1.51", 867 | "Received node-token from 192.168.1.51.. ok.", 868 | "[INFO] Using v1.27.11+k3s1 as release", 869 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 870 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 871 | "[INFO] Verifying binary download", 872 | "[INFO] Installing k3s to /usr/local/bin/k3s", 873 | "[INFO] Skipping installation of SELinux RPM", 874 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 875 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 876 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 877 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 878 | "[INFO] Creating uninstall script /usr/local/bin/k3s-agent-uninstall.sh", 879 | "[INFO] env: Creating environment file /etc/systemd/system/k3s-agent.service.env", 880 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s-agent.service", 881 | "[INFO] systemd: Enabling k3s-agent unit", 882 | "[INFO] systemd: Starting k3s-agent", 883 | "Logs: Created symlink /etc/systemd/system/multi-user.target.wants/k3s-agent.service → /etc/systemd/system/k3s-agent.service.", 884 | "Output: [INFO] Using v1.27.11+k3s1 as release", 885 | "[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/sha256sum-amd64.txt", 886 | "[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.27.11+k3s1/k3s", 887 | "[INFO] Verifying binary download", 888 | "[INFO] Installing k3s to /usr/local/bin/k3s", 889 | "[INFO] Skipping installation of SELinux RPM", 890 | "[INFO] Creating /usr/local/bin/kubectl symlink to k3s", 891 | "[INFO] Creating /usr/local/bin/crictl symlink to k3s", 892 | "[INFO] Creating /usr/local/bin/ctr symlink to k3s", 893 | "[INFO] Creating killall script /usr/local/bin/k3s-killall.sh", 894 | "[INFO] Creating uninstall script /usr/local/bin/k3s-agent-uninstall.sh", 895 | "[INFO] env: Creating environment file /etc/systemd/system/k3s-agent.service.env", 896 | "[INFO] systemd: Creating service file /etc/systemd/system/k3s-agent.service", 897 | "[INFO] systemd: Enabling k3s-agent unit", 898 | "[INFO] systemd: Starting k3s-agent", 899 | "", 900 | "🚀 Speed up GitHub Actions/GitLab CI + reduce costs: https://actuated.dev" 901 | ] 902 | } 903 | } 904 | 905 | PLAY RECAP ********************************************************************* 906 | localhost : ok=14 changed=5 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 907 | s1.k3s.local.technoplaza.net : ok=13 changed=6 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 908 | 909 | 910 | real 0m53.615s 911 | user 0m6.208s 912 | sys 0m1.609s 913 | + ansible-playbook sysengquick.k3s.deploy_rancher 914 | 915 | PLAY [Deploy rancher] ********************************************************** 916 | 917 | TASK [Deploy cert-manager] ***************************************************** 918 | 919 | TASK [sysengquick.k3s.rancher : Deploy cert-manager with helm] ***************** 920 | changed: [localhost] 921 | 922 | TASK [sysengquick.k3s.rancher : Display output] ******************************** 923 | ok: [localhost] => { 924 | "msg": { 925 | "cmd": [ 926 | "/usr/sbin/helm", 927 | "upgrade", 928 | "--install", 929 | "cert-manager", 930 | "jetstack/cert-manager", 931 | "--create-namespace", 932 | "--namespace", 933 | "cert-manager", 934 | "--set", 935 | "installCRDs=true", 936 | "--version", 937 | "v1.14.4" 938 | ], 939 | "stdout": [ 940 | "Release \"cert-manager\" does not exist. Installing it now.", 941 | "NAME: cert-manager", 942 | "LAST DEPLOYED: Fri Mar 29 17:36:36 2024", 943 | "NAMESPACE: cert-manager", 944 | "STATUS: deployed", 945 | "REVISION: 1", 946 | "TEST SUITE: None", 947 | "NOTES:", 948 | "cert-manager v1.14.4 has been deployed successfully!", 949 | "", 950 | "In order to begin issuing certificates, you will need to set up a ClusterIssuer", 951 | "or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).", 952 | "", 953 | "More information on the different types of issuers and how to configure them", 954 | "can be found in our documentation:", 955 | "", 956 | "https://cert-manager.io/docs/configuration/", 957 | "", 958 | "For information on how to configure cert-manager to automatically provision", 959 | "Certificates for Ingress resources, take a look at the `ingress-shim`", 960 | "documentation:", 961 | "", 962 | "https://cert-manager.io/docs/usage/ingress/" 963 | ] 964 | } 965 | } 966 | 967 | TASK [Deploy rancher] ********************************************************** 968 | 969 | TASK [sysengquick.k3s.rancher : Deploy rancher with helm] ********************** 970 | changed: [localhost] 971 | 972 | TASK [sysengquick.k3s.rancher : Display output] ******************************** 973 | ok: [localhost] => { 974 | "msg": { 975 | "cmd": [ 976 | "/usr/sbin/helm", 977 | "upgrade", 978 | "--install", 979 | "rancher", 980 | "rancher-stable/rancher", 981 | "--create-namespace", 982 | "--namespace", 983 | "cattle-system", 984 | "--set", 985 | "bootstrapPassword=AReallyGoodP@ssw0rd", 986 | "--set", 987 | "hostname=rancher.k3s.local.technoplaza.net", 988 | "--set", 989 | "replicas=3", 990 | "--version", 991 | "2.8.2" 992 | ], 993 | "stdout": [ 994 | "Release \"rancher\" does not exist. Installing it now.", 995 | "NAME: rancher", 996 | "LAST DEPLOYED: Fri Mar 29 17:36:55 2024", 997 | "NAMESPACE: cattle-system", 998 | "STATUS: deployed", 999 | "REVISION: 1", 1000 | "TEST SUITE: None", 1001 | "NOTES:", 1002 | "Rancher Server has been installed.", 1003 | "", 1004 | "NOTE: Rancher may take several minutes to fully initialize. Please standby while Certificates are being issued, Containers are started and the Ingress rule comes up.", 1005 | "", 1006 | "Check out our docs at https://rancher.com/docs/", 1007 | "", 1008 | "If you provided your own bootstrap password during installation, browse to https://rancher.k3s.local.technoplaza.net to get started.", 1009 | "", 1010 | "If this is the first time you installed Rancher, get started by running this command and clicking the URL it generates:", 1011 | "", 1012 | "```", 1013 | "echo https://rancher.k3s.local.technoplaza.net/dashboard/?setup=$(kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}')", 1014 | "```", 1015 | "", 1016 | "To get just the bootstrap password on its own, run:", 1017 | "", 1018 | "```", 1019 | "kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ \"\\n\" }}'", 1020 | "```", 1021 | "", 1022 | "", 1023 | "Happy Containering!" 1024 | ] 1025 | } 1026 | } 1027 | 1028 | TASK [sysengquick.k3s.rancher : Expose rancher load balancer service] ********** 1029 | skipping: [localhost] 1030 | 1031 | TASK [sysengquick.k3s.rancher : Display output] ******************************** 1032 | skipping: [localhost] 1033 | 1034 | PLAY RECAP ********************************************************************* 1035 | localhost : ok=4 changed=2 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 1036 | 1037 | 1038 | real 0m22.512s 1039 | user 0m5.658s 1040 | sys 0m0.871s 1041 | + kubectl -n cattle-system rollout status deploy/rancher 1042 | Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... 1043 | Waiting for deployment spec update to be observed... 1044 | Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... 1045 | Waiting for deployment "rancher" rollout to finish: 1 of 3 updated replicas are available... 1046 | Waiting for deployment "rancher" rollout to finish: 2 of 3 updated replicas are available... 1047 | deployment "rancher" successfully rolled out 1048 | 1049 | real 1m10.728s 1050 | user 0m0.156s 1051 | sys 0m0.028s 1052 | + kubectl get nodes -o wide 1053 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 1054 | s1 Ready control-plane,etcd,master 2m18s v1.27.11+k3s1 192.168.1.51 Ubuntu 22.04.4 LTS 5.15.0-1053-kvm containerd://1.7.11-k3s2.27 1055 | s2 Ready control-plane,etcd,master 117s v1.27.11+k3s1 192.168.1.52 Ubuntu 22.04.4 LTS 5.15.0-1053-kvm containerd://1.7.11-k3s2.27 1056 | s3 Ready control-plane,etcd,master 106s v1.27.11+k3s1 192.168.1.53 Ubuntu 22.04.4 LTS 5.15.0-1053-kvm containerd://1.7.11-k3s2.27 1057 | w1 Ready 100s v1.27.11+k3s1 192.168.1.54 Ubuntu 22.04.4 LTS 5.15.0-1053-kvm containerd://1.7.11-k3s2.27 1058 | w2 Ready 94s v1.27.11+k3s1 192.168.1.55 Ubuntu 22.04.4 LTS 5.15.0-1053-kvm containerd://1.7.11-k3s2.27 1059 | + kubectl get pods --all-namespaces -o wide 1060 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 1061 | cattle-fleet-system fleet-controller-6fc8c65685-cmqt6 1/1 Running 0 102m 10.42.3.4 w1 1062 | cattle-fleet-system gitjob-6c8875d84d-fv2mp 1/1 Running 0 102m 10.42.1.3 s2 1063 | cattle-provisioning-capi-system capi-controller-manager-79ddf9b9d8-f7cq4 1/1 Running 0 100m 10.42.4.8 w2 1064 | cattle-system helm-operation-nrwh4 0/2 Completed 0 85s 10.42.2.8 s3 1065 | cattle-system rancher-7bcd45c474-qzcjl 1/1 Running 0 103m 10.42.2.3 s3 1066 | cattle-system rancher-7bcd45c474-tc8w8 1/1 Running 0 103m 10.42.1.2 s2 1067 | cattle-system rancher-7bcd45c474-ztwq2 1/1 Running 0 103m 10.42.4.4 w2 1068 | cattle-system rancher-webhook-7476c74c6c-6z5n5 1/1 Running 0 101m 10.42.4.7 w2 1069 | cert-manager cert-manager-5f8646db6b-tdbp4 1/1 Running 0 103m 10.42.3.3 w1 1070 | cert-manager cert-manager-cainjector-5cf5f57dd7-66bnh 1/1 Running 0 103m 10.42.3.2 w1 1071 | cert-manager cert-manager-webhook-687b7f8b97-rb8tv 1/1 Running 0 103m 10.42.4.2 w2 1072 | kube-system coredns-77ccd57875-fgz4g 1/1 Running 0 104m 10.42.0.6 s1 1073 | kube-system helm-install-traefik-crd-zm2cw 0/1 Completed 0 104m 10.42.0.5 s1 1074 | kube-system helm-install-traefik-rxx5r 0/1 Completed 2 104m 10.42.0.2 s1 1075 | kube-system kube-vip-cloud-provider-578d9b7bf7-c5pp4 1/1 Running 0 104m 10.42.0.7 s1 1076 | kube-system kube-vip-ds-547ps 1/1 Running 0 103m 192.168.1.53 s3 1077 | kube-system kube-vip-ds-67z2p 1/1 Running 0 104m 192.168.1.51 s1 1078 | kube-system kube-vip-ds-nbkmr 1/1 Running 0 104m 192.168.1.52 s2 1079 | kube-system local-path-provisioner-79ffd768b5-b6x9r 1/1 Running 0 104m 10.42.0.4 s1 1080 | kube-system metrics-server-648b5df564-mk4jq 1/1 Running 0 104m 10.42.0.3 s1 1081 | kube-system traefik-768bdcdcdd-klkjm 1/1 Running 0 103m 10.42.2.2 s3 1082 | + kubectl -n cattle-system get deploy rancher 1083 | NAME READY UP-TO-DATE AVAILABLE AGE 1084 | rancher 3/3 3 3 72s 1085 | 1086 | real 5m26.746s 1087 | user 0m41.468s 1088 | sys 0m8.960s 1089 | --------------------------------------------------------------------------------