├── .envrc ├── .gitignore ├── .taskfiles ├── Taskfile_ansible.yml ├── Taskfile_darwin.yml ├── Taskfile_linux.yml └── Taskfile_vagrant.yml ├── .vagrant_ssh_key ├── README.md ├── Taskfile.yml ├── Vagrantfile ├── ansible.cfg ├── inventory ├── homelab │ ├── group_vars │ │ ├── all │ │ │ ├── ansible-settings.yml │ │ │ ├── cni-settings.yml │ │ │ ├── k3s-settings.yml │ │ │ ├── keepalived-settings.yml │ │ │ ├── meta-settings.yml │ │ │ ├── os-settings.yml │ │ │ ├── registry-settings.yml │ │ │ └── rsyslog-settings.yml │ │ ├── master │ │ │ ├── k3s-settings.yml │ │ │ └── meta-settings.yml │ │ └── worker │ │ │ └── k3s-settings.yml │ ├── host_vars │ │ ├── k8s-master-a.yml │ │ ├── k8s-master-b.yml │ │ ├── k8s-master-c.yml │ │ ├── k8s-worker-a.yml │ │ └── k8s-worker-f.yml │ └── hosts.yml └── vagrant │ ├── group_vars │ └── all.yml │ ├── host_vars │ ├── k8s-master.yml │ ├── k8s-worker-a.yml │ └── k8s-worker-b.yml │ └── hosts.yml ├── playbooks ├── k3s-build.yml ├── k3s-nuke.yml ├── k3s-upgrade.yml ├── os-longhorn.yml ├── os-prepare.yml ├── os-rsyslog.yml └── rook-ceph-nuke.yml ├── requirements.yml ├── roles ├── k3s │ ├── tasks │ │ ├── cni-calico.yml │ │ ├── cni-cilium-kube-router.yml │ │ ├── kubeconfig.yml │ │ ├── main.yml │ │ ├── meta.yml │ │ ├── prometheus-operator-crds.yml │ │ └── registry.yml │ ├── templates │ │ ├── calico-bgpconfiguration.yaml.j2 │ │ ├── calico-bgppeer.yaml.j2 │ │ ├── calico-ebpf-configmap.yaml.j2 │ │ ├── calico-installation.yaml.j2 │ │ ├── cilium-etcd-secrets.yaml.j2 │ │ ├── cilium-helmchart.yaml.j2 │ │ ├── kube-router.yaml.j2 │ │ └── registries.yaml.j2 │ └── vars │ │ └── main.yml ├── longhorn │ └── tasks │ │ ├── disks.yml │ │ ├── main.yml │ │ ├── meta.yml │ │ └── multipath.yml └── os │ ├── handlers │ └── main.yml │ ├── tasks │ ├── filesystem.yml │ ├── host.yml │ ├── keepalived.yml │ ├── kernel.yml │ ├── main.yml │ ├── motd.yml │ ├── network.yml │ ├── packages.yml │ ├── power-button.yml │ ├── rsyslog.yml │ ├── time.yml │ ├── unattended-upgrades.yml │ └── user.yml │ ├── templates │ └── keepalived.conf.j2 │ └── vars │ └── main.yml └── tests ├── TEST.md ├── banana.yaml ├── cilium-values.yaml ├── home-assistant-values.yaml ├── kube-router.yaml ├── longhorn-values.yaml ├── metallb-values.yaml ├── nginx-deployment.yaml ├── nginx-ingress-values.yaml └── repos.yaml /.envrc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export KUBECONFIG=./kubeconfig 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.vdi 2 | *.log 3 | .vagrant 4 | .DS_Store 5 | kubeconfig 6 | .vscode 7 | .tasks 8 | ignore 9 | 10 | roles/xanmanning.k3s* 11 | -------------------------------------------------------------------------------- /.taskfiles/Taskfile_ansible.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | vars: 5 | ANSIBLE_HOSTS: inventory/local/hosts.yml 6 | 7 | tasks: 8 | 9 | deps: 10 | desc: Install Ansible Galaxy dependencies 11 | cmds: 12 | - ansible-galaxy install -r requirements.yml 13 | 14 | ping: 15 | desc: Ping all the nodes 16 | cmds: 17 | - ansible all -i {{.ANSIBLE_HOSTS}} --one-line -m ping 18 | 19 | list: 20 | desc: List all the nodes 21 | cmds: 22 | - ansible all -i {{.ANSIBLE_HOSTS}} --list-hosts 23 | 24 | reboot: 25 | desc: Reboot all the nodes 26 | cmds: 27 | - ansible all -i {{.ANSIBLE_HOSTS}} -b -m shell -a "/sbin/shutdown -r now" 28 | 29 | shutdown: 30 | desc: Shutdown all the nodes 31 | cmds: 32 | - ansible all -i {{.ANSIBLE_HOSTS}} -b -m shell -a "/sbin/shutdown -h now" 33 | -------------------------------------------------------------------------------- /.taskfiles/Taskfile_darwin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | tasks: 5 | 6 | install: 7 | desc: Install required Homebrew apps 8 | cmds: 9 | - brew install ansible kubernetes-cli helm 10 | 11 | install:dev: 12 | desc: Install required Homebrew apps for local development with Vagrant and Virtualbox 13 | cmds: 14 | - brew tap esolitos/ipa 15 | - brew install pre-commit yamllint jq yq shellcheck sshpass coreutils findutils 16 | - brew cask install virtualbox vagrant 17 | -------------------------------------------------------------------------------- /.taskfiles/Taskfile_linux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | tasks: 5 | 6 | install: 7 | desc: Install required apt-get apps 8 | cmds: 9 | - sudo apt-get install -y ansible 10 | - sudo apt-get update 11 | - sudo apt-get install -y apt-transport-https gnupg2 12 | - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 13 | - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list 14 | - sudo apt-get update 15 | - sudo apt-get install -y kubectl 16 | - curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - 17 | - echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 18 | - sudo apt-get update 19 | - sudo apt-get install -y helm 20 | 21 | install:dev: 22 | desc: Install required apt-get apps for local development with Vagrant and Virtualbox 23 | cmds: 24 | - sudo apt-get install -y virtualbox-6.1 25 | - sudo apt-get install -y vagrant 26 | - sudo apt-get install -y yamllint jq shellcheck sshpass 27 | - curl -L -o vagrant.deb https://releases.hashicorp.com/vagrant/2.2.10/vagrant_2.2.10_x86_64.deb 28 | - sudo dpkg -i vagrant.deb 29 | - rm vagrant.deb 30 | - curl -sL -o /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 31 | - chmod +x /usr/local/bin/yq 32 | - pip install pre-commit 33 | -------------------------------------------------------------------------------- /.taskfiles/Taskfile_vagrant.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | vars: 5 | VMS: "k8s-node-10 k8s-node-11 k8s-node-12" 6 | 7 | tasks: 8 | 9 | up: 10 | desc: Create VMs 11 | cmds: 12 | - | 13 | vagrant status --machine-readable --no-color | grep metadata | cut -d, -f2 | {{if eq OS "linux"}}xargs{{else}}gxargs{{end}} --max-procs=4 -I {} vagrant up --provider virtualbox {} 14 | 15 | snapshot: 16 | desc: Snapshot VMs 17 | cmds: 18 | - | 19 | for vm in {{.VMS}}; do 20 | VBoxManage snapshot "${vm}" take "baseline" --live 21 | done 22 | 23 | build: 24 | desc: Create and take a snapshot of VMs 25 | cmds: 26 | - task: up 27 | - task: snapshot 28 | 29 | rebuild: 30 | desc: Rebuild 31 | cmds: 32 | - task: destroy 33 | - task: build 34 | 35 | destroy: 36 | desc: Destroy 37 | cmds: 38 | - | 39 | vboxmanage closemedium $(realpath k8s-node-11-block.vdi) --delete > /dev/null 2>&1 || true 40 | vboxmanage closemedium $(realpath k8s-node-12-block.vdi) --delete > /dev/null 2>&1 || true 41 | for vm in {{.VMS}}; do 42 | VBoxManage controlvm "${vm}" poweroff > /dev/null 2>&1 43 | VBoxManage unregistervm "${vm}" --delete > /dev/null 2>&1 44 | done 45 | rm -rf *.vdi > /dev/null 2>&1 || true 46 | 47 | start: 48 | desc: Start 49 | cmds: 50 | - | 51 | for vm in {{.VMS}}; do 52 | VBoxManage startvm "${vm}" --type headless 53 | done 54 | 55 | stop: 56 | desc: Stop 57 | cmds: 58 | - | 59 | for vm in {{.VMS}}; do 60 | VBoxManage controlvm "${vm}" poweroff 61 | done 62 | 63 | restart: 64 | desc: restart 65 | cmds: 66 | - task: stop 67 | - task: start 68 | 69 | restore: 70 | desc: Restore 71 | cmds: 72 | - | 73 | for vm in {{.VMS}}; do 74 | VBoxManage controlvm "${vm}" poweroff 75 | VBoxManage snapshot "${vm}" restore "baseline" 76 | VBoxManage startvm "${vm}" --type headless 77 | done 78 | 79 | box:update: 80 | desc: Update the Vagrant boxes 81 | cmds: 82 | - vagrant box update 83 | 84 | box:prune: 85 | desc: Prune inactive boxes 86 | cmds: 87 | - vagrant box prune --keep-active-boxes 88 | -------------------------------------------------------------------------------- /.vagrant_ssh_key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onedr0p/k3s-homeops-ansible/f51911ecdbc9bd583111510fc6e22fdf780dcc9d/.vagrant_ssh_key -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k3s-homeops-ansible 2 | 3 | This is an opinionated way to provision Ubuntu 20.04 and install k3s on top. 4 | 5 | ## Prerequisites 6 | 7 | There's a couple things that will need to be done before you get starting running with Ansible. 8 | 9 | 1) Install Ansible >= 2.10.0 on your local machine 10 | 2) Install a supported OS on each of your nodes 11 | 3) Set static IP for each node on the OS or in your router (you can use the IP assigned via DHCP, but it's not recommended) 12 | 4) Copy your local public ssh key with `ssh-copy-id` to each node 13 | 5) _Optional_ review playbooks and roles to understand what these Ansible script will do 14 | 15 | After that we're ready to continue with Ansible... 16 | 17 | ## Ansible 18 | 19 | Get started by cloning this repository and copying the hosts and config into a new directory. 20 | 21 | ```bash 22 | # clone this repo 23 | git clone https://github.com/onedr0p/k3s-homeops-ansible 24 | # change into the directory 25 | cd k3s-homeops-ansible 26 | # copy the hosts and config to a new folder 27 | cp -r ./inventory/local ./inventory/custom 28 | ``` 29 | 30 | ### Update the Ansible config files 31 | 32 | **Note:** This project uses [PyratLabs/ansible-role-k3s](https://github.com/PyratLabs/ansible-role-k3s) for installing k3s. Configuration options can be viewed in their README. 33 | 34 | After you have copied over the configuration files you will need to update the configuration in the files: 35 | 36 | - `./inventory/custom/hosts.yml`: Host definitions 37 | - `./inventory/custom/host_vars/*.yml`: Host IP and host level variables 38 | - `./inventory/custom/group_vars/*.yml`: Global variables for all hosts 39 | 40 | Each file it carefully documented. 41 | 42 | ### Get Ansible dependencies 43 | 44 | ```bash 45 | ansible-galaxy install -r requirements.yml 46 | ``` 47 | 48 | ### Run the playbooks 49 | 50 | ```bash 51 | # This playbook will prepare your nodes for Kubernetes 52 | ansible-playbook -i ./inventory/custom/hosts.yml ./playbooks/os-build.yml 53 | # This playbook will install k3s 54 | ansible-playbook -i ./inventory/custom/hosts.yml ./playbooks/cluster-build.yml 55 | ``` 56 | 57 | ### Verify the cluster is up and running 58 | 59 | ```bash 60 | kubectl --kubeconfig ./kubeconfig get nodes -o wide 61 | ``` 62 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | includes: 5 | deps: .taskfiles/Taskfile_{{OS}}.yml 6 | vagrant: .taskfiles/Taskfile_vagrant.yml 7 | ansible: .taskfiles/Taskfile_ansible.yml 8 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | # -*- mode: ruby -*- 3 | # vi: set ft=ruby : 4 | 5 | # Specify minimum Vagrant version and Vagrant API version 6 | Vagrant.require_version ">= 1.6.0" 7 | VAGRANTFILE_API_VERSION = "2" 8 | 9 | # Set cpus to half number of host cpus 10 | cpus = case RbConfig::CONFIG["host_os"] 11 | when /darwin/ then `sysctl -n hw.ncpu`.to_i / 2 12 | when /linux/ then `nproc`.to_i / 2 13 | else 2 14 | end 15 | 16 | NODES_NUM = 3 17 | IP_BASE_PRIVATE = "172.16.20." 18 | 19 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 20 | config.ssh.insert_key = false 21 | 22 | (1..NODES_NUM).each do |i| 23 | config.vm.define "k8s-node-#{i + 9}" do |config| 24 | 25 | hostname = "k8s-node-#{i + 9}" 26 | 27 | memory = case hostname 28 | when "k8s-node-10" then 2048 29 | else 1024 30 | end 31 | 32 | config.vm.box = "bento/ubuntu-20.04" 33 | config.vm.network "private_network", ip: "#{IP_BASE}#{i + 9}" 34 | config.vm.hostname = hostname 35 | config.vm.provider "virtualbox" 36 | config.vm.provider :virtualbox do |v| 37 | v.linked_clone = true 38 | v.gui = false 39 | v.customize [ 40 | 'modifyvm', :id, 41 | "--cpus", cpus, 42 | "--memory", memory, 43 | "--name", hostname, 44 | "--ioapic", "on", 45 | '--audio', 'none', 46 | "--uartmode1", "file", File::NULL, 47 | ] 48 | 49 | # Create a block device for Longhorn on the worker nodes 50 | if hostname != "k8s-node-10" 51 | disk = "./"+hostname+"-block.vdi" 52 | unless File.exist?(disk) 53 | v.customize [ 54 | "createhd", 55 | "--filename", disk, 56 | "--variant", "Fixed", 57 | "--size", 1024*5 58 | ] 59 | end 60 | v.customize [ 61 | "storageattach", :id, 62 | "--storagectl", "SATA Controller", 63 | "--port", 2, 64 | "--device", 0, 65 | "--type", "hdd", 66 | "--medium", disk 67 | ] 68 | end 69 | end 70 | end 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | #--- General settings 4 | nocows = True 5 | forks = 8 6 | module_name = command 7 | deprecation_warnings = True 8 | executable = /bin/bash 9 | # ansible_managed = This file is managed by Ansible 10 | 11 | #--- Files/Directory settings 12 | log_path = ~/ansible.log 13 | inventory = ./inventory 14 | library = /usr/share/my_modules 15 | remote_tmp = ~/.ansible/tmp 16 | local_tmp = ~/.ansible/tmp 17 | roles_path = ./roles 18 | retry_files_enabled = False 19 | 20 | #--- Fact Caching settings 21 | fact_caching = jsonfile 22 | fact_caching_connection = ~/.ansible/facts_cache 23 | fact_caching_timeout = 7200 24 | 25 | #--- SSH settings 26 | remote_port = 22 27 | timeout = 30 28 | host_key_checking = False 29 | ssh_executable = /usr/bin/ssh 30 | private_key_file = ~/.ssh/id_rsa 31 | 32 | force_valid_group_names = ignore 33 | 34 | #--- Speed 35 | callback_whitelist = ansible.posix.profile_tasks 36 | internal_poll_interval = 0.001 37 | 38 | [inventory] 39 | unparsed_is_failed = true 40 | # enable_plugins = virtualbox 41 | 42 | [privilege_scalation] 43 | become = True 44 | become_method = sudo 45 | become_user = root 46 | become_ask_pass = False 47 | 48 | [ssh_connection] 49 | scp_if_ssh = smart 50 | transfer_method = smart 51 | retries = 3 52 | timeout = 10 53 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s 54 | pipelining = True 55 | control_path = %(directory)s/%%h-%%r -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/ansible-settings.yml: -------------------------------------------------------------------------------- 1 | # Ansible user to ssh into servers with 2 | ansible_user: "ubuntu" 3 | ansible_ssh_pass: "ubuntu" 4 | ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null" 5 | ansible_become_pass: "ubuntu" 6 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/cni-settings.yml: -------------------------------------------------------------------------------- 1 | # Use the Calico CNI driver instead of Flannel 2 | # ...adjust 'k3s_flannel_backend' and 'k3s_no_flannel' if you want to use flannel 3 | calico: 4 | enabled: false 5 | operator_manifest: "https://docs.projectcalico.org/manifests/tigera-operator.yaml" 6 | # Enabling BGP requires your router set up to handle it 7 | bgp: 8 | enabled: false 9 | # peer is usually your router e.g. 192.168.1.1 10 | peer: 192.168.42.1 11 | as: 64512 12 | # externalIPs is the network you want services to consume 13 | # this network should not exist or be defined anywhere in your network 14 | # e.g. 192.168.169.0/24 15 | externalIPs: 192.168.169.0/24 16 | # Enable the eBPF dataplane - disables kube-proxy 17 | # https://docs.projectcalico.org/maintenance/enabling-bpf 18 | ebpf: 19 | enabled: false 20 | 21 | # Use the cilium CNI driver w/ kube-router - disables kube-proxy 22 | cilium_kube_router: 23 | enabled: true 24 | cilium: 25 | registry: docker.io/cilium 26 | version: 1.9.0-rc0 27 | # Specify the CIDR for native routing 28 | # usually the network CIDR that your master and worker nodes are on 29 | # https://docs.cilium.io/en/v1.8/concepts/networking/routing/ 30 | nativeRoutingCIDR: 192.168.42.0/24 31 | # Use the embedded ectd from k3s for the kvstore 32 | # https://docs.cilium.io/en/v1.8/gettingstarted/k8s-install-external-etcd 33 | etcd: true 34 | # BGP settings for kube-router 35 | kube_router: 36 | registry: docker.io/cloudnativelabs/kube-router 37 | version: 1.1.0 38 | # peerRouterIP is usually your router e.g. 192.168.1.1 39 | peerRouterIP: 192.168.42.1 40 | peerRouterASNS: 64512 41 | clusterASN: 64512 42 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/k3s-settings.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Below vars are for the xanmanning.k3s role 3 | # ...see https://github.com/PyratLabs/ansible-role-k3s#group-variables 4 | # 5 | 6 | # Use a specific version of k3s 7 | # k3s_release_version: "v1.19" 8 | # k3s_release_version: "48ed47c4a3e420fa71c18b2ec97f13dc0659778b" 9 | # k3s_release_version: "c3c983198f4c8c7480c486032ac7a70b267f69da" 10 | k3s_release_version: "43cb200fd0540b416ddec1216e371d41be2dadf2" 11 | 12 | # Install using hard links rather than symbolic links. 13 | # ...if you are using the system-upgrade-controller you will need to use hard links rather than symbolic links as the controller will not be able to follow symbolic links. 14 | k3s_install_hard_links: true 15 | 16 | # Do not deploy the following 17 | k3s_no_traefik: true 18 | k3s_no_servicelb: true 19 | k3s_no_metrics_server: true 20 | k3s_no_flannel: true 21 | k3s_no_local_storage: true 22 | k3s_flannel_backend: "none" 23 | 24 | # k3s_default_local_storage_path: "/dev/shm/k8s" 25 | 26 | # Use a specific control node address. IP or FQDN. 27 | # ...k8s-master-vip is defined in /etc/hosts of all nodes if you are using keepalived 28 | k3s_control_node_address: "{{ 'k8s-master-vip' if keepalived.enabled is defined and keepalived.enabled else groups['master'][0] }}" 29 | 30 | k3s_become_for_all: true 31 | 32 | # Network CIDR to use for pod IPs 33 | # k3s_cluster_cidr: "10.90.0.0/16" 34 | k3s_cluster_cidr: "10.69.0.0/16" 35 | 36 | # Network CIDR to use for service IPs 37 | # k3s_service_cidr: "10.96.0.0/16" 38 | k3s_service_cidr: "10.96.0.0/16" 39 | 40 | # Disable kube-proxy 41 | k3s_disable_kube_proxy: "{{ true if ((cilium_kube_router.enabled is defined and cilium_kube_router.enabled) or (calico.ebpf.enabled is defined and calico.ebpf.enabled)) else false }}" 42 | 43 | # Disable k3s default cloud controller manager. 44 | k3s_disable_cloud_controller: true 45 | 46 | # Disable k3s default network policy controller. 47 | # kube-router has it's own network-policy controller (or so it seems) 48 | # https://github.com/rancher/k3s/issues/1308 49 | # k3s_disable_network_policy: "{{ true if cilium_kube_router.enabled is defined and cilium_kube_router.enabled else false }}" 50 | k3s_disable_network_policy: false 51 | 52 | # Use experimental features (spooky!) 53 | k3s_use_experimental: true 54 | 55 | # Enable any feature-gates 56 | k3s_kubelet_args: 57 | - feature-gates: ExternalPolicyForExternalIP=true 58 | 59 | # Enable debugging 60 | k3s_debug: true 61 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/keepalived-settings.yml: -------------------------------------------------------------------------------- 1 | # Install keepalived and set a VIP 2 | # ...only enabled if you are using HA masters 3 | keepalived: 4 | enabled: true 5 | vip: "192.168.42.99" 6 | interface: "{{ ansible_default_ipv4['interface'] }}" 7 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/meta-settings.yml: -------------------------------------------------------------------------------- 1 | # Apply system-upgrade label, for use with 2 | # https://rancher.com/docs/k3s/latest/en/upgrades/automated/ 3 | k3s_system_upgrade: true 4 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/os-settings.yml: -------------------------------------------------------------------------------- 1 | # Timezone for the servers 2 | timezone: "America/New_York" 3 | 4 | # Set custom ntp servers 5 | ntp_servers: 6 | primary: 7 | - "time.cloudflare.com" 8 | - "time.google.com" 9 | fallback: 10 | - "0.us.pool.ntp.org" 11 | - "1.us.pool.ntp.org" 12 | - "2.us.pool.ntp.org" 13 | - "3.us.pool.ntp.org" 14 | 15 | # Additional ssh public keys to add to the nodes 16 | ssh_authorized_keys: 17 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCzsu/AuBZkK5J9hZLya9ood8RuXqnN0Eo4hEeFvibuhQKyUerwLG1COa8d1QDXSJR37c/JjXKRzYX5JE347P48hU7dN+vnoi9uKuhHu7NILaLeo2QrQKTuWKygj23ee4EHj0B/XP562seqv9/zLmbaTZO3J1lXFugTVo0kXGbzCWQnOZyEEY+cUHhHVSu4/ugmkkUvV744CYo7TmcMwBjfrOnvamWVD09dYcX6wq0KLDU59BzVj6GNs8TbwUNR52EAYv0PXDPeKUkrMx5YaEGoc286kFWGiIJYSKMOukuZYPOSkgwONQSGt6Hvzft+eCxvR93J1vNcMIV1PjTfRdZvPa1mvJ1/uzWWWGQfODGKmBaDMZ4e2o4ItLJcopAUWbpr/bQIadCpt4ckKPJJ9km+qhYz+Np0ZfQ1Ay5NOTLZx3IhBuXHIxLIr0PialTig9+MXf1ZCwW2SI7kPrVVB5IwysA8R9Z/p7cnv0uOVNNfFMyIsI8jVnsqeRN4GFMweBY1kJwK4X7dnsKfoYWkjQ9DUaZ62wTk0u4P4zSGxEVx43rk/f2b+nXZbl1IhgA/2SV2b2eHIGHE6aSMSjUHXrZ7ZVJhAwqDFoN18NvBKOHQo6CKHG5jyvII4I8+NbnAGPG320VbvQoAA8nlUO8rsy05KdBU0U9UI3gq7bYWFJaKew== devin.kray@gmail.com" 18 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDO1R1i7dP0dY3f3rnlIQjVPsg3p0Xvz/16oZq+cUmE+O3An9qKQNUwWA98EdnMfq09qLpU1OBSafrLjZ8rd4+4KZJNhFgOCX1qq9GDo+o87toYZUvZ6ftdokpgYlYh0sSUxCjbEnaYAr9W2ZYs32fSJAZdoSydUp9gsTlYQJtq4OYKeQd4gd0oJ2F+L3Rv/HwY1U2+ynW4XnsnbfYky9DkQJIgirFwhwsuDot7VMW14mvI1T4IN+rGHfvHsEqsKrVCOUwfsgnNEXR4R5lSE0mNgZiEFnh/F7pA8QGmcKrNv/Fd4cZRdWxtDdrmEVVtbVnlnRrtV0NS9VqJiJm6Rm9QQ9LLemHAGJHT90hUknSB5qw1pxJ/uZ5hCQzOAzfABQ+Jgj4VzT4PpwaGF529v9VSsV6KPPyF0pP8YNKgRaSBxFqFTwIBLH9eBiE0neIHDGBBi3H9pbYtlMHoy9pnepSPPhD4vSiVbFVSFxwKJ9hoVpRKt3gZuhtIMuDVFpgiAHYE9VYSKbfH0FuBLqrNzF0inkn0Luo+wWyCUNO2y+mZZKpm61huCgdWGL4HfSNLKqwxBtG0oKI1GISGzipay9m0u0JrPjeNr0bD0Sg28aDhjEeds6P9Xe/j/QZg6YWW11rUH33GFljHo6Llu9rBfoiM9BJqXDyInkzfYcq91cojmw== devin.kray@gmail.com" 19 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBB8yES+Os3okY1TwOgfaRsIffAWwPxkgCO1BDfY7Ynno7YxmYgyRaxUnOrkuWfMwiWcYwLgk+yipBf5ctYRsEQkK4cVn1VYR3a0YeMa8tBDa36EyR+A0IcJ1GOOq24+l9N9Y/a/3kz8lC7eIope67WOIikccRrbiP89nP1FicAsDMpPLfcXFKb0G4MQHrY1w8zcd9lJFQkqwBCFaoZKW2tCaE21zYuqhv3gLAUcSiUF6FNZSnwvpFwC1s9jFqvrrOiLsXD61hHqO/Muo6xiPaSsjHBy72kfvHM4vKK8au99TX4sMwZ+f1QJqyvzWqG52HfMNLD14Wf3dChgZRkBUGu8MY+jGpX23VmcmypPFBwTB1+jm4n5XlgpeuqG4BumS5gdrRsHNX0tfRRftzaSnLeAEOGQz6SCXor0m/FOXdYBDiVihdsCrKwFZK6AmPF5XwMnCMJIC3RCIKgRxYW/Pxd3fLlD0WlM10MYdJLxK0wBiLbugFS5iRBHLwn0MgJxEkRY7+j46/Ds5RRiFrYOqFG0A7GQj/mZKxSVeYuHUAnRIfaWlZ71TGqf3RRCsPbrTkeFCPToqPLxOEpj+IXMAB3adI+R6OmtRoZV4HaiWDfHDxNj0lsS2w2AyhrWPeyAz84yRmhcB5tA7hi/KYCNT/c7Wt7EdcjlGA5Z20urlfhw== devin.kray@gmail.com" 20 | 21 | # Disable ipv6 22 | disable_ipv6: true -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/registry-settings.yml: -------------------------------------------------------------------------------- 1 | # Configure a pull thru cache using an existing registry 2 | # ...requires a registry cache already set up 3 | registry: 4 | cache: 5 | enabled: true 6 | address: "http://192.168.1.39:8082" 7 | custom: 8 | enabled: false 9 | address: "http://x.x.x.x:5000" 10 | username: "username" 11 | password: "password" -------------------------------------------------------------------------------- /inventory/homelab/group_vars/all/rsyslog-settings.yml: -------------------------------------------------------------------------------- 1 | # Enable rsyslog 2 | # ...requires a rsyslog server already set up 3 | rsyslog: 4 | enabled: false 5 | ip: x.x.x.x 6 | port: 1514 -------------------------------------------------------------------------------- /inventory/homelab/group_vars/master/k3s-settings.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Below vars are for the xanmanning.k3s role 3 | # ...see https://github.com/PyratLabs/ansible-role-k3s#group-variables 4 | # 5 | 6 | # Define the host as a control plane node 7 | k3s_control_node: true 8 | 9 | # Add additional hosname or IP as Subject Alternate Name in the TLS cert 10 | k3s_tls_san: 11 | - "{{ keepalived.vip }}" 12 | - "k8s-master-vip" 13 | 14 | # Use embedded etcd 15 | k3s_etcd_datastore: true 16 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/master/meta-settings.yml: -------------------------------------------------------------------------------- 1 | k3s_taint_master: false 2 | -------------------------------------------------------------------------------- /inventory/homelab/group_vars/worker/k3s-settings.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Below vars are for the xanmanning.k3s role 3 | # ...see https://github.com/PyratLabs/ansible-role-k3s#group-variables 4 | # 5 | 6 | # Define the host as a control plane node 7 | k3s_control_node: false 8 | -------------------------------------------------------------------------------- /inventory/homelab/host_vars/k8s-master-a.yml: -------------------------------------------------------------------------------- 1 | # IP address of node 2 | ansible_host: "192.168.42.100" 3 | 4 | # IP Address to advertise for this node. 5 | # for more k3s host variables, see 6 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 7 | k3s_node_ip_address: "{{ ansible_host }}" 8 | 9 | # Format and mount a device for longhorn 10 | longhorn: 11 | enabled: false 12 | disks: 13 | - "/dev/sdb" 14 | filesystem: "ext4" 15 | mountpoint: "/var/lib/longhorn" 16 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 17 | # lvcreate_opts: "" 18 | # Defines additional mount options (e.g. noatime, noexec, etc) 19 | # mount_opts: "" 20 | 21 | # Set enabled to true to mark this host as running a distributed storage rook-ceph 22 | rook_ceph: 23 | enabled: false 24 | devices: 25 | - /dev/nvme0n1 26 | -------------------------------------------------------------------------------- /inventory/homelab/host_vars/k8s-master-b.yml: -------------------------------------------------------------------------------- 1 | # IP address of node 2 | ansible_host: "192.168.42.101" 3 | 4 | # IP Address to advertise for this node. 5 | # for more k3s host variables, see 6 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 7 | k3s_node_ip_address: "{{ ansible_host }}" 8 | 9 | # Format and mount a device for longhorn 10 | longhorn: 11 | enabled: false 12 | disks: 13 | - "/dev/sdb" 14 | filesystem: "ext4" 15 | mountpoint: "/var/lib/longhorn" 16 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 17 | # lvcreate_opts: "" 18 | # Defines additional mount options (e.g. noatime, noexec, etc) 19 | # mount_opts: "" 20 | 21 | # Set enabled to true to mark this host as running a distributed storage rook-ceph 22 | rook_ceph: 23 | enabled: false 24 | devices: 25 | - /dev/nvme0n1 26 | -------------------------------------------------------------------------------- /inventory/homelab/host_vars/k8s-master-c.yml: -------------------------------------------------------------------------------- 1 | # IP address of node 2 | ansible_host: "192.168.42.102" 3 | 4 | # IP Address to advertise for this node. 5 | # for more k3s host variables, see 6 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 7 | k3s_node_ip_address: "{{ ansible_host }}" 8 | 9 | # Format and mount a device for longhorn 10 | longhorn: 11 | enabled: false 12 | disks: 13 | - "/dev/sdb" 14 | filesystem: "ext4" 15 | mountpoint: "/var/lib/longhorn" 16 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 17 | # lvcreate_opts: "" 18 | # Defines additional mount options (e.g. noatime, noexec, etc) 19 | # mount_opts: "" 20 | 21 | # Set enabled to true to mark this host as running a distributed storage rook-ceph 22 | rook_ceph: 23 | enabled: false 24 | devices: 25 | - /dev/nvme0n1 26 | -------------------------------------------------------------------------------- /inventory/homelab/host_vars/k8s-worker-a.yml: -------------------------------------------------------------------------------- 1 | # IP address of node 2 | ansible_host: "192.168.42.120" 3 | 4 | # IP Address to advertise for this node. 5 | # for more k3s host variables, see 6 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 7 | k3s_node_ip_address: "{{ ansible_host }}" 8 | 9 | # Format and mount a device for longhorn 10 | longhorn: 11 | enabled: false 12 | disks: 13 | - "/dev/sdb" 14 | filesystem: "ext4" 15 | mountpoint: "/var/lib/longhorn" 16 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 17 | # lvcreate_opts: "" 18 | # Defines additional mount options (e.g. noatime, noexec, etc) 19 | # mount_opts: "" 20 | 21 | # Set enabled to true to mark this host as running a distributed storage rook-ceph 22 | rook_ceph: 23 | enabled: true 24 | devices: 25 | - /dev/nvme0n1 26 | -------------------------------------------------------------------------------- /inventory/homelab/host_vars/k8s-worker-f.yml: -------------------------------------------------------------------------------- 1 | # IP address of node 2 | ansible_host: "192.168.42.125" 3 | 4 | # IP Address to advertise for this node. 5 | # for more k3s host variables, see 6 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 7 | k3s_node_ip_address: "{{ ansible_host }}" 8 | 9 | # Format and mount a device for longhorn 10 | longhorn: 11 | enabled: false 12 | disks: 13 | - "/dev/sdb" 14 | filesystem: "ext4" 15 | mountpoint: "/var/lib/longhorn" 16 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 17 | # lvcreate_opts: "" 18 | # Defines additional mount options (e.g. noatime, noexec, etc) 19 | # mount_opts: "" 20 | 21 | # Set enabled to true to mark this host as running a distributed storage rook-ceph 22 | rook_ceph: 23 | enabled: false 24 | devices: 25 | - /dev/nvme0n1 26 | -------------------------------------------------------------------------------- /inventory/homelab/hosts.yml: -------------------------------------------------------------------------------- 1 | all: 2 | children: 3 | # Master group, do not change the 'master' name 4 | # hosts should match the filenames in 'host_vars' 5 | master: 6 | hosts: 7 | k8s-master-a: 8 | k8s-master-b: 9 | k8s-master-c: 10 | # Worker group, do not change the 'worker' name 11 | # hosts should match the filenames in 'host_vars' 12 | worker: 13 | hosts: 14 | k8s-worker-a: 15 | k8s-worker-f: 16 | -------------------------------------------------------------------------------- /inventory/vagrant/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # 4 | # Vars for this role 5 | # 6 | 7 | # Ansible user to ssh into servers with 8 | ansible_user: "vagrant" 9 | ansible_ssh_pass: "vagrant" 10 | ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null" 11 | ansible_become_pass: "vagrant" 12 | 13 | # Timezone for the servers 14 | timezone: "America/New_York" 15 | 16 | # Set custom ntp servers 17 | ntp_servers: 18 | primary: 19 | - "time.cloudflare.com" 20 | - "time.google.com" 21 | fallback: 22 | - "0.us.pool.ntp.org" 23 | - "1.us.pool.ntp.org" 24 | - "2.us.pool.ntp.org" 25 | - "3.us.pool.ntp.org" 26 | 27 | # Enable coping kubeconfig to ~/.kube 28 | # ...otherwise a kubeconfig will be left in the root of the repo 29 | kubeconfig: false 30 | 31 | # Additional ssh public keys to add to the nodes 32 | ssh_authorized_keys: 33 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCzsu/AuBZkK5J9hZLya9ood8RuXqnN0Eo4hEeFvibuhQKyUerwLG1COa8d1QDXSJR37c/JjXKRzYX5JE347P48hU7dN+vnoi9uKuhHu7NILaLeo2QrQKTuWKygj23ee4EHj0B/XP562seqv9/zLmbaTZO3J1lXFugTVo0kXGbzCWQnOZyEEY+cUHhHVSu4/ugmkkUvV744CYo7TmcMwBjfrOnvamWVD09dYcX6wq0KLDU59BzVj6GNs8TbwUNR52EAYv0PXDPeKUkrMx5YaEGoc286kFWGiIJYSKMOukuZYPOSkgwONQSGt6Hvzft+eCxvR93J1vNcMIV1PjTfRdZvPa1mvJ1/uzWWWGQfODGKmBaDMZ4e2o4ItLJcopAUWbpr/bQIadCpt4ckKPJJ9km+qhYz+Np0ZfQ1Ay5NOTLZx3IhBuXHIxLIr0PialTig9+MXf1ZCwW2SI7kPrVVB5IwysA8R9Z/p7cnv0uOVNNfFMyIsI8jVnsqeRN4GFMweBY1kJwK4X7dnsKfoYWkjQ9DUaZ62wTk0u4P4zSGxEVx43rk/f2b+nXZbl1IhgA/2SV2b2eHIGHE6aSMSjUHXrZ7ZVJhAwqDFoN18NvBKOHQo6CKHG5jyvII4I8+NbnAGPG320VbvQoAA8nlUO8rsy05KdBU0U9UI3gq7bYWFJaKew== devin.kray@gmail.com" 34 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDO1R1i7dP0dY3f3rnlIQjVPsg3p0Xvz/16oZq+cUmE+O3An9qKQNUwWA98EdnMfq09qLpU1OBSafrLjZ8rd4+4KZJNhFgOCX1qq9GDo+o87toYZUvZ6ftdokpgYlYh0sSUxCjbEnaYAr9W2ZYs32fSJAZdoSydUp9gsTlYQJtq4OYKeQd4gd0oJ2F+L3Rv/HwY1U2+ynW4XnsnbfYky9DkQJIgirFwhwsuDot7VMW14mvI1T4IN+rGHfvHsEqsKrVCOUwfsgnNEXR4R5lSE0mNgZiEFnh/F7pA8QGmcKrNv/Fd4cZRdWxtDdrmEVVtbVnlnRrtV0NS9VqJiJm6Rm9QQ9LLemHAGJHT90hUknSB5qw1pxJ/uZ5hCQzOAzfABQ+Jgj4VzT4PpwaGF529v9VSsV6KPPyF0pP8YNKgRaSBxFqFTwIBLH9eBiE0neIHDGBBi3H9pbYtlMHoy9pnepSPPhD4vSiVbFVSFxwKJ9hoVpRKt3gZuhtIMuDVFpgiAHYE9VYSKbfH0FuBLqrNzF0inkn0Luo+wWyCUNO2y+mZZKpm61huCgdWGL4HfSNLKqwxBtG0oKI1GISGzipay9m0u0JrPjeNr0bD0Sg28aDhjEeds6P9Xe/j/QZg6YWW11rUH33GFljHo6Llu9rBfoiM9BJqXDyInkzfYcq91cojmw== devin.kray@gmail.com" 35 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBB8yES+Os3okY1TwOgfaRsIffAWwPxkgCO1BDfY7Ynno7YxmYgyRaxUnOrkuWfMwiWcYwLgk+yipBf5ctYRsEQkK4cVn1VYR3a0YeMa8tBDa36EyR+A0IcJ1GOOq24+l9N9Y/a/3kz8lC7eIope67WOIikccRrbiP89nP1FicAsDMpPLfcXFKb0G4MQHrY1w8zcd9lJFQkqwBCFaoZKW2tCaE21zYuqhv3gLAUcSiUF6FNZSnwvpFwC1s9jFqvrrOiLsXD61hHqO/Muo6xiPaSsjHBy72kfvHM4vKK8au99TX4sMwZ+f1QJqyvzWqG52HfMNLD14Wf3dChgZRkBUGu8MY+jGpX23VmcmypPFBwTB1+jm4n5XlgpeuqG4BumS5gdrRsHNX0tfRRftzaSnLeAEOGQz6SCXor0m/FOXdYBDiVihdsCrKwFZK6AmPF5XwMnCMJIC3RCIKgRxYW/Pxd3fLlD0WlM10MYdJLxK0wBiLbugFS5iRBHLwn0MgJxEkRY7+j46/Ds5RRiFrYOqFG0A7GQj/mZKxSVeYuHUAnRIfaWlZ71TGqf3RRCsPbrTkeFCPToqPLxOEpj+IXMAB3adI+R6OmtRoZV4HaiWDfHDxNj0lsS2w2AyhrWPeyAz84yRmhcB5tA7hi/KYCNT/c7Wt7EdcjlGA5Z20urlfhw== devin.kray@gmail.com" 36 | 37 | # Enable rsyslog 38 | # ...requires a rsyslog server already set up 39 | remote_syslog: 40 | enabled: false 41 | ip: x.x.x.x 42 | port: 1514 43 | 44 | # Configure a pull thru cache using an existing registry 45 | # ...requires a registry cache already set up 46 | registry_cache: 47 | enabled: false 48 | address: "http://x.x.x.x:5000" 49 | 50 | # # @TODO Allow a custom registry 51 | # registry_custom: 52 | # - name: "" 53 | # enabled: false 54 | # address: "http://x.x.x.x:5000" 55 | # username: "" 56 | # password: "" 57 | # # - name: "" 58 | # # enabled: false 59 | # # address: "http://x.x.x.x:5000" 60 | # # username: "" 61 | # # password: "" 62 | 63 | # Install keepalived and set a VIP 64 | # ...only enabled if you are using HA masters 65 | keepalived: 66 | enabled: false 67 | vip: "x.x.x.x" 68 | interface: "{{ ansible_default_ipv4['interface'] }}" 69 | 70 | # Use the Calico CNI driver instead of flannel 71 | # ...adjust 'k3s_flannel_backend' and 'k3s_no_flannel' if you want to use flannel 72 | calico: 73 | enabled: true 74 | operator_manifest: "https://docs.projectcalico.org/manifests/tigera-operator.yaml" 75 | # https://docs.projectcalico.org/networking/mtu#operator 76 | mtu: 1500 77 | bgp: 78 | # enabling bgp requires your router set up to handle it 79 | enabled: false 80 | # peer is usually your router e.g. 192.168.1.1 81 | peer: x.x.x.1 82 | as: 64512 83 | # externalIPs is the network you want services to consume 84 | # this network should not exist or be defined anywhere in your network 85 | # e.g. 192.168.69.0/24 86 | externalIPs: x.x.x.0/24 87 | 88 | # Use the cilium CNI driver instead of flannel 89 | # ...adjust 'k3s_flannel_backend' and 'k3s_no_flannel' if you want to use flannel 90 | cilium: 91 | enabled: false 92 | version: 1.8.3 93 | peerRouterIP: 192.168.42.1 94 | peerRouterASNS: 64512 95 | clusterASN: 64512 96 | 97 | # Apply CRDs to the cluster 98 | crds: 99 | - "https://raw.githubusercontent.com/prometheus-community/helm-charts/main/charts/kube-prometheus-stack/crds/crd-alertmanager.yaml" 100 | - "https://raw.githubusercontent.com/prometheus-community/helm-charts/main/charts/kube-prometheus-stack/crds/crd-podmonitor.yaml" 101 | - "https://raw.githubusercontent.com/prometheus-community/helm-charts/main/charts/kube-prometheus-stack/crds/crd-prometheus.yaml" 102 | - "https://raw.githubusercontent.com/prometheus-community/helm-charts/main/charts/kube-prometheus-stack/crds/crd-prometheusrules.yaml" 103 | - "https://raw.githubusercontent.com/prometheus-community/helm-charts/main/charts/kube-prometheus-stack/crds/crd-servicemonitor.yaml" 104 | - "https://raw.githubusercontent.com/prometheus-community/helm-charts/main/charts/kube-prometheus-stack/crds/crd-thanosrulers.yaml" 105 | - "https://raw.githubusercontent.com/fluxcd/helm-operator/v1.2.0/deploy/crds.yaml" 106 | - "https://github.com/jetstack/cert-manager/releases/download/v1.0.2/cert-manager.crds.yaml" 107 | 108 | # 109 | # 110 | # Vars for the xanmanning.k3s role 111 | # ...see https://github.com/PyratLabs/ansible-role-k3s#group-variables 112 | # 113 | # 114 | 115 | # Use a specific version of k3s 116 | k3s_release_version: "v1.19" 117 | 118 | # Install using hard links rather than symbolic links. 119 | # ... If you are using the system-upgrade-controller you will need to use hard links rather than symbolic links as the controller will not be able to follow symbolic links. 120 | k3s_install_hard_links: true 121 | 122 | # Do not deploy the following 123 | k3s_no_traefik: true 124 | k3s_no_servicelb: true 125 | k3s_no_metrics_server: true 126 | k3s_no_flannel: true 127 | k3s_no_local_storage: true 128 | k3s_flannel_backend: "none" 129 | 130 | # Use a specific control node address 131 | # ... set to "{{ keepalived.vip }}" if using keepalived 132 | k3s_control_node_address: "172.16.20.10" 133 | 134 | k3s_become_for_all: true 135 | 136 | # Network CIDR to use for pod IPs 137 | k3s_cluster_cidr: "10.42.0.0/16" 138 | 139 | # Network CIDR to use for service IPs 140 | k3s_service_cidr: "10.43.0.0/16" 141 | 142 | # Disable kube-proxy (only enable if you are using cilium) 143 | k3s_disable_kube_proxy: false 144 | 145 | # Enable any feature-gates 146 | k3s_kubelet_args: 147 | - feature-gates: ExternalPolicyForExternalIP=true 148 | 149 | # Enable debugging 150 | k3s_debug: true 151 | -------------------------------------------------------------------------------- /inventory/vagrant/host_vars/k8s-master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # IP address of node 4 | ansible_host: "172.16.20.10" 5 | 6 | # IP Address to advertise for this node 7 | # for more k3s host variables, see 8 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 9 | k3s_control_node: true 10 | 11 | # Add additional hosname or IP as Subject Alternate Name in the TLS cert 12 | k3s_tls_san: "{{ ansible_host }}" 13 | 14 | # IP Address to advertise for this node 15 | k3s_node_ip_address: "{{ ansible_host }}" 16 | 17 | # Apply system-upgrade label, for use with 18 | # https://rancher.com/docs/k3s/latest/en/upgrades/automated/ 19 | k3s_system_upgrade: true 20 | -------------------------------------------------------------------------------- /inventory/vagrant/host_vars/k8s-worker-a.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # IP address of node 4 | ansible_host: "172.16.20.11" 5 | 6 | # IP Address to advertise for this node 7 | # for more k3s host variables, see 8 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 9 | k3s_node_ip_address: "{{ ansible_host }}" 10 | 11 | # Format and mount a device for longhorn 12 | longhorn: 13 | enabled: true 14 | disks: 15 | - "/dev/sdb" 16 | filesystem: "ext4" 17 | mountpoint: "/var/lib/longhorn" 18 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 19 | # lvcreate_opts: "" 20 | # Defines additional mount options (e.g. noatime, noexec, etc) 21 | # mount_opts: "" 22 | 23 | # Apply system-upgrade label, for use with 24 | # https://rancher.com/docs/k3s/latest/en/upgrades/automated/ 25 | k3s_system_upgrade: true -------------------------------------------------------------------------------- /inventory/vagrant/host_vars/k8s-worker-b.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # IP address of node 4 | ansible_host: "172.16.20.12" 5 | 6 | # IP Address to advertise for this node 7 | # for more k3s host variables, see 8 | # https://github.com/PyratLabs/ansible-role-k3s#host-variables 9 | k3s_node_ip_address: "{{ ansible_host }}" 10 | 11 | # Format and mount a device for longhorn 12 | longhorn: 13 | enabled: true 14 | disks: 15 | - "/dev/sdb" 16 | filesystem: "ext4" 17 | mountpoint: "/var/lib/longhorn" 18 | # Defines additional lvcreate options (e.g. stripes, stripesize, etc) 19 | # lvcreate_opts: "" 20 | # Defines additional mount options (e.g. noatime, noexec, etc) 21 | # mount_opts: "" 22 | 23 | # Apply system-upgrade label, for use with 24 | # https://rancher.com/docs/k3s/latest/en/upgrades/automated/ 25 | k3s_system_upgrade: true -------------------------------------------------------------------------------- /inventory/vagrant/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | children: 5 | # Master group, do not change the 'master' name 6 | # hosts should match the filenames in 'host_vars' 7 | master: 8 | hosts: 9 | k8s-master: 10 | # Worker group, do not change the 'worker' name 11 | # hosts should match the filenames in 'host_vars' 12 | worker: 13 | hosts: 14 | k8s-worker-a: 15 | k8s-worker-b: 16 | -------------------------------------------------------------------------------- /playbooks/k3s-build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | vars_prompt: 8 | - name: "run_confirmation" 9 | prompt: "Are you sure you want to run this over the cluster? [Y/n]" 10 | default: "n" 11 | private: no 12 | any_errors_fatal: yes 13 | pre_tasks: 14 | - name: cluster | check confirmation 15 | fail: 16 | msg: "Abort." 17 | when: run_confirmation != "Y" 18 | roles: 19 | - xanmanning.k3s 20 | - k3s 21 | -------------------------------------------------------------------------------- /playbooks/k3s-nuke.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | vars_prompt: 8 | name: "run_confirmation" 9 | prompt: "This will DESTROY your cluster. Do you wish to continue? [Y/n]" 10 | default: "n" 11 | private: no 12 | any_errors_fatal: yes 13 | 14 | pre_tasks: 15 | - name: cluster-nuke | check confirmation 16 | fail: 17 | msg: "Abort." 18 | when: run_confirmation != "Y" 19 | 20 | tasks: 21 | - name: cluster-nuke | kill k3s 22 | command: /usr/local/bin/k3s-killall.sh 23 | 24 | - name: cluster-nuke | uninstall k3s 25 | command: 26 | cmd: /usr/local/bin/k3s-uninstall.sh 27 | removes: /usr/local/bin/k3s-uninstall.sh 28 | 29 | - name: cluster-nuke | uninstall k3s agent 30 | command: 31 | cmd: /usr/local/bin/k3s-agent-uninstall.sh 32 | removes: /usr/local/bin/k3s-agent-uninstall.sh 33 | 34 | - name: cluster-nuke | gather list of CNI files to delete 35 | find: 36 | paths: /etc/cni/net.d 37 | patterns: "*" 38 | register: files_to_delete 39 | 40 | - name: cluster-nuke | delete CNI files 41 | file: 42 | path: "{{ item.path }}" 43 | state: absent 44 | loop: "{{ files_to_delete.files }}" 45 | 46 | - name: cluster-nuke | reboot 47 | reboot: 48 | msg: "Nodes are being rebooted..." 49 | ignore_errors: true 50 | -------------------------------------------------------------------------------- /playbooks/k3s-upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | vars_prompt: 8 | - name: "run_confirmation" 9 | prompt: "Are you sure you want to run this over the cluster? [Y/n]" 10 | default: "n" 11 | private: no 12 | any_errors_fatal: yes 13 | pre_tasks: 14 | - name: cluster | check confirmation 15 | fail: 16 | msg: "Abort." 17 | when: run_confirmation != "Y" 18 | roles: 19 | - xanmanning.k3s 20 | -------------------------------------------------------------------------------- /playbooks/os-longhorn.yml: -------------------------------------------------------------------------------- 1 | - hosts: 2 | - worker 3 | become: true 4 | gather_facts: true 5 | vars_prompt: 6 | name: "run_confirmation" 7 | prompt: "This will DESTROY your longhorn volumes. Do you wish to continue? [Y/n]" 8 | default: "n" 9 | private: no 10 | any_errors_fatal: yes 11 | pre_tasks: 12 | - name: check confirmation 13 | fail: 14 | msg: "Abort." 15 | when: 16 | - run_confirmation != "Y" 17 | 18 | - name: check longhorn is enabled 19 | fail: 20 | msg: "Abort." 21 | when: 22 | - longhorn.enabled is defined 23 | - not longhorn.enabled 24 | 25 | roles: 26 | - longhorn -------------------------------------------------------------------------------- /playbooks/os-prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | vars_prompt: 8 | - name: "restart_confirmation" 9 | prompt: "Restart nodes when complete? [Y/n]" 10 | default: "n" 11 | private: no 12 | roles: 13 | - os 14 | tasks: 15 | - name: os-prepare | reboot 16 | ignore_errors: true 17 | reboot: 18 | msg: "Nodes are being rebooted..." 19 | when: restart_confirmation == "Y" 20 | tags: 21 | - always 22 | -------------------------------------------------------------------------------- /playbooks/os-rsyslog.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | roles: 8 | - rsyslog 9 | -------------------------------------------------------------------------------- /playbooks/rook-ceph-nuke.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | vars_prompt: 8 | name: "nuke_confirmation" 9 | prompt: "This will DESTROY rook-ceph disks. Do you wish to continue? [Y/n]" 10 | default: "n" 11 | private: no 12 | any_errors_fatal: yes 13 | pre_tasks: 14 | - name: check confirmation 15 | fail: 16 | msg: "Abort." 17 | when: nuke_confirmation != "Y" 18 | 19 | tasks: 20 | - name: rook-ceph | remove /var/lib/rook 21 | become: true 22 | file: 23 | state: absent 24 | path: "/var/lib/rook" 25 | when: 26 | - rook_ceph.enabled is defined 27 | - rook_ceph.enabled 28 | 29 | - name: rook-ceph | zap the drives 30 | become: true 31 | shell: > 32 | sgdisk --zap-all {{ item }} || true 33 | loop: 34 | - "{{ rook_ceph.devices }}" 35 | when: 36 | - rook_ceph.enabled is defined 37 | - rook_ceph.enabled 38 | 39 | - name: rook-ceph | remove lvm partitions 40 | become: true 41 | shell: "{{ item }}" 42 | loop: 43 | - ls /dev/mapper/ceph--* | xargs -I% -- fuser --kill % 44 | - ls /dev/mapper/ceph--* | xargs -I% -- dmsetup clear % 45 | - ls /dev/mapper/ceph--* | xargs -I% -- dmsetup remove -f % 46 | - ls /dev/mapper/ceph--* | xargs -I% -- rm -rf % 47 | when: 48 | - rook_ceph.enabled is defined 49 | - rook_ceph.enabled 50 | 51 | - name: rook-ceph | wipe the block device 52 | become: true 53 | command: "wipefs -af {{ item }}" 54 | loop: 55 | - "{{ rook_ceph.devices }}" 56 | when: 57 | - rook_ceph.enabled is defined 58 | - rook_ceph.enabled 59 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | - src: xanmanning.k3s 4 | version: v1.14.1 5 | -------------------------------------------------------------------------------- /roles/k3s/tasks/cni-calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: cluster | calico | get all calicoctl releases 4 | get_url: 5 | url: https://api.github.com/repos/projectcalico/calicoctl/releases 6 | dest: /tmp/calicoctl.releases.json 7 | 8 | - name: cluster | calico | get latest calicoctl release version 9 | shell: > 10 | cat /tmp/calicoctl.releases.json | jq --raw-output '.[0].tag_name' 11 | register: calicoctl_latest_release 12 | 13 | - name: cluster | calico | download latest calicoctl 14 | become: yes 15 | get_url: 16 | url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calicoctl_latest_release.stdout_lines[0] }}/calicoctl-linux-amd64" 17 | dest: "/usr/local/bin/calicoctl" 18 | mode: 0755 19 | 20 | - name: cluster | calico | deploy tigera operator to k3s manifest directory 21 | become: true 22 | get_url: 23 | url: "{{ calico.operator_manifest }}" 24 | dest: "{{ k3s_server_manifests_dir }}/tigera-operator.yaml" 25 | mode: 0644 26 | 27 | - name: cluster | calico | deploy configuration to k3s manifest directory 28 | become: true 29 | template: 30 | src: "calico-installation.yaml.j2" 31 | dest: "{{ k3s_server_manifests_dir }}/calico-installation.yaml" 32 | mode: 0644 33 | 34 | - name: cluster | calico | deploy configure BGP-peer to k3s manifest directory 35 | become: true 36 | template: 37 | src: "calico-bgppeer.yaml.j2" 38 | dest: "{{ k3s_server_manifests_dir }}/calico-bgppeer.yaml" 39 | mode: 0644 40 | when: 41 | - calico.bgp.enabled is defined 42 | - calico.bgp.enabled 43 | 44 | - name: cluster | calico | deploy configure BGP-configuration to k3s manifest directory 45 | become: true 46 | template: 47 | src: "calico-bgpconfiguration.yaml.j2" 48 | dest: "{{ k3s_server_manifests_dir }}/calico-bgpconfiguration.yaml" 49 | mode: 0644 50 | when: 51 | - calico.bgp.enabled is defined 52 | - calico.bgp.enabled 53 | 54 | # 55 | # Enable eBPF and DSR 56 | # https://docs.projectcalico.org/maintenance/enabling-bpf 57 | # 58 | 59 | - name: cluster | calico | deploy eBPF configuration to k3s manifest directory 60 | become: true 61 | template: 62 | src: "calico-ebpf-configmap.yaml.j2" 63 | dest: "{{ k3s_server_manifests_dir }}/calico-ebpf-configmap.yaml" 64 | mode: 0644 65 | when: 66 | - calico.ebpf.enabled is defined 67 | - calico.ebpf.enabled 68 | 69 | - name: cluster | calico | wait for resources to be deployed 70 | pause: 71 | seconds: "30" 72 | when: 73 | - calico.ebpf.enabled is defined 74 | - calico.ebpf.enabled 75 | 76 | - name: cluster | calico | enable eBPF 77 | become: true 78 | run_once: true 79 | environment: 80 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 81 | DATASTORE_TYPE: "kubernetes" 82 | shell: > 83 | calicoctl patch felixconfiguration default --patch='{"spec": {"bpfEnabled": true}}' 84 | when: 85 | - k3s_control_node is defined 86 | - k3s_control_node 87 | - calico.ebpf.enabled is defined 88 | - calico.ebpf.enabled 89 | 90 | - name: cluster | calico | enable DSR 91 | become: true 92 | run_once: true 93 | environment: 94 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 95 | DATASTORE_TYPE: "kubernetes" 96 | shell: > 97 | calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}' 98 | when: 99 | - k3s_control_node is defined 100 | - k3s_control_node 101 | - calico.ebpf.enabled is defined 102 | - calico.ebpf.enabled 103 | -------------------------------------------------------------------------------- /roles/k3s/tasks/cni-cilium-kube-router.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: cluster | cilium | load etcd ca file 4 | become: true 5 | slurp: 6 | src: "/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt" 7 | register: slurped_etcd_ca_file 8 | when: 9 | - cilium_kube_router.cilium.etcd is defined 10 | - cilium_kube_router.cilium.etcd 11 | 12 | - name: cluster | cilium | load etcd cert file 13 | become: true 14 | slurp: 15 | src: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" 16 | register: slurped_etcd_cert_file 17 | when: 18 | - cilium_kube_router.cilium.etcd is defined 19 | - cilium_kube_router.cilium.etcd 20 | 21 | - name: cluster | cilium | load etcd cert key file 22 | become: true 23 | slurp: 24 | src: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" 25 | register: slurped_etcd_cert_key_file 26 | when: 27 | - cilium_kube_router.cilium.etcd is defined 28 | - cilium_kube_router.cilium.etcd 29 | 30 | - name: cluster | cilium | deploy etcd secret to k3s manifest directory 31 | become: true 32 | template: 33 | src: "cilium-etcd-secrets.yaml.j2" 34 | dest: "{{ k3s_server_manifests_dir }}/cilium-etcd-secrets.yaml" 35 | mode: 0644 36 | when: 37 | - cilium_kube_router.cilium.etcd is defined 38 | - cilium_kube_router.cilium.etcd 39 | 40 | # - name: cluster | cilium | deploy secret for etcd to k3s manifest directory 41 | # become: true 42 | # environment: 43 | # KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 44 | # shell: > 45 | # kubectl create secret generic -n kube-system cilium-etcd-secrets 46 | # --from-file=etcd-client-ca.crt=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt 47 | # --from-file=etcd-client.key=/var/lib/rancher/k3s/server/tls/etcd/server-client.key 48 | # --from-file=etcd-client.crt=/var/lib/rancher/k3s/server/tls/etcd/server-client.crt 49 | # --dry-run=client 50 | # --output=yaml 51 | # > "{{ k3s_server_manifests_dir }}/cilium-etcd-secrets.yaml" 52 | # when: 53 | # - cilium_kube_router.cilium.etcd is defined 54 | # - cilium_kube_router.cilium.etcd 55 | 56 | - name: cluster | cilium | deploy configuration to k3s manifest directory 57 | become: true 58 | template: 59 | src: "cilium-helmchart.yaml.j2" 60 | dest: "{{ k3s_server_manifests_dir }}/cilium-helmchart.yaml" 61 | mode: 0644 62 | 63 | - name: cluster | cilium | deploy kube-router to k3s manifest directory 64 | become: true 65 | template: 66 | src: "kube-router.yaml.j2" 67 | dest: "{{ k3s_server_manifests_dir }}/kube-router.yaml" 68 | mode: 0644 69 | -------------------------------------------------------------------------------- /roles/k3s/tasks/kubeconfig.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: cluster | kubeconfig | copy config file to local directory 4 | become: true 5 | run_once: true 6 | fetch: 7 | src: "/etc/rancher/k3s/k3s.yaml" 8 | dest: "{{ inventory_dir | dirname | dirname }}/kubeconfig" 9 | owner: "{{ lookup('env', 'USER') }}" 10 | flat: true 11 | when: 12 | - k3s_control_node is defined 13 | - k3s_control_node 14 | 15 | - name: cluster | kubeconfig | update kubeconfig with the right IPv4 address 16 | delegate_to: localhost 17 | become: false 18 | run_once: true 19 | replace: 20 | path: "{{ inventory_dir | dirname | dirname }}/kubeconfig" 21 | regexp: 'https://127.0.0.1:6443' 22 | replace: 'https://{{ keepalived.vip if keepalived.enabled is defined and keepalived.enabled else k3s_control_node_address }}:6443' 23 | 24 | - name: cluster | kubeconfig | create directory .kube 25 | delegate_to: localhost 26 | become: false 27 | run_once: true 28 | file: 29 | path: ~/.kube 30 | state: directory 31 | owner: "{{ lookup('env', 'USER') }}" 32 | when: 33 | - kubeconfig is defined 34 | - kubeconfig 35 | 36 | - name: cluster | kubeconfig | copy config file to user home directory 37 | delegate_to: localhost 38 | become: false 39 | run_once: true 40 | copy: 41 | src: "{{ inventory_dir | dirname | dirname }}/kubeconfig" 42 | dest: "~/.kube/config" 43 | owner: "{{ lookup('env', 'USER') }}" 44 | flat: true 45 | when: 46 | - kubeconfig is defined 47 | - kubeconfig 48 | -------------------------------------------------------------------------------- /roles/k3s/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: kubeconfig.yml 4 | tags: 5 | - kubeconfig 6 | 7 | - include: meta.yml 8 | tags: 9 | - meta 10 | 11 | - include: prometheus-operator-crds.yml 12 | tags: 13 | - prometheus-operator-crds 14 | 15 | - include: registry.yml 16 | when: (registry.cache.enabled is defined 17 | and registry.cache.enabled) 18 | or (registry.custom.enabled is defined 19 | and registry.custom.enabled) 20 | tags: 21 | - registry 22 | 23 | - include: cni-calico.yml 24 | when: 25 | - k3s_control_node is defined 26 | - k3s_control_node 27 | - calico.enabled is defined 28 | - calico.enabled 29 | tags: 30 | - cni-calico 31 | 32 | - include: cni-cilium-kube-router.yml 33 | when: 34 | - k3s_control_node is defined 35 | - k3s_control_node 36 | - cilium_kube_router.enabled is defined 37 | - cilium_kube_router.enabled 38 | tags: 39 | - cni-cilium-kube-router 40 | -------------------------------------------------------------------------------- /roles/k3s/tasks/meta.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: cluster | meta | apply master taints 4 | become: true 5 | run_once: true 6 | environment: 7 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 8 | command: kubectl taint --overwrite node {{ hostvars[item]['ansible_hostname'] }} node-role.kubernetes.io/master=true:NoSchedule 9 | loop: "{{ groups['master'] }}" 10 | register: apply_master_taints 11 | retries: 3 12 | until: apply_master_taints is success 13 | when: 14 | - k3s_control_node is defined 15 | - k3s_control_node 16 | - k3s_taint_master is defined 17 | - k3s_taint_master 18 | - groups['worker'] | length > 0 19 | 20 | - name: cluster | meta | apply worker annotations 21 | become: true 22 | run_once: true 23 | environment: 24 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 25 | command: kubectl label --overwrite node {{ hostvars[item]['ansible_hostname'] }} node-role.kubernetes.io/worker=true 26 | loop: "{{ groups['worker'] }}" 27 | register: apply_worker_annotations 28 | retries: 3 29 | until: apply_worker_annotations is success 30 | when: 31 | - k3s_control_node is defined 32 | - k3s_control_node 33 | 34 | - name: cluster | meta | apply system-upgrade annotations 35 | become: true 36 | run_once: true 37 | environment: 38 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 39 | command: kubectl label --overwrite node {{ hostvars[item]['ansible_hostname'] }} k3s-upgrade=true 40 | # Cannot use loop w/ hostvars in when... https://github.com/ansible/ansible/issues/68364 41 | with_items: 42 | - "{{ groups['master'] }}" 43 | - "{{ groups['worker'] }}" 44 | register: apply_system_upgrade_annotations 45 | retries: 3 46 | until: apply_system_upgrade_annotations is success 47 | when: 48 | - k3s_control_node is defined 49 | - k3s_control_node 50 | - hostvars[item].k3s_system_upgrade is defined 51 | - hostvars[item].k3s_system_upgrade 52 | 53 | - name: cluster | meta | remove system-upgrade annotations 54 | become: true 55 | run_once: true 56 | environment: 57 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 58 | command: kubectl label --overwrite node {{ hostvars[item]['ansible_hostname'] }} k3s-upgrade- 59 | # Cannot use loop w/ hostvars in when... https://github.com/ansible/ansible/issues/68364 60 | with_items: 61 | - "{{ groups['master'] }}" 62 | - "{{ groups['worker'] }}" 63 | register: remove_system_upgrade_annotations 64 | retries: 3 65 | until: remove_system_upgrade_annotations is success 66 | when: 67 | - k3s_control_node is defined 68 | - k3s_control_node 69 | - (hostvars[item].k3s_system_upgrade is not defined or not hostvars[item].k3s_system_upgrade) 70 | -------------------------------------------------------------------------------- /roles/k3s/tasks/prometheus-operator-crds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: cluster | apply prometheus-operator CRDs 4 | become: true 5 | run_once: true 6 | environment: 7 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 8 | command: "kubectl apply -f {{ item }}" 9 | loop: "{{ prometheus_operator_crds }}" 10 | register: apply_prometheus_operator_crds 11 | retries: 3 12 | until: apply_prometheus_operator_crds is success 13 | when: 14 | - k3s_control_node is defined 15 | - k3s_control_node 16 | - prometheus_operator_crds is defined 17 | - prometheus_operator_crds is iterable 18 | - prometheus_operator_crds | length > 0 -------------------------------------------------------------------------------- /roles/k3s/tasks/registry.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: cluster-registry | create /etc/rancher/k3s 4 | become: true 5 | file: 6 | path: "/etc/rancher/k3s" 7 | state: directory 8 | 9 | - name: cluster-registry | configure mirrors and custom registries 10 | become: true 11 | template: 12 | src: "registries.yaml.j2" 13 | dest: "/etc/rancher/k3s/registries.yaml" 14 | mode: 0644 15 | 16 | - name: cluster-registry | restart k3s systemd service 17 | systemd: 18 | name: k3s.service 19 | daemon_reload: yes 20 | enabled: yes 21 | state: restarted 22 | -------------------------------------------------------------------------------- /roles/k3s/templates/calico-bgpconfiguration.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: crd.projectcalico.org/v1 3 | kind: BGPConfiguration 4 | metadata: 5 | name: default 6 | spec: 7 | serviceExternalIPs: 8 | - cidr: {{ calico.bgp.externalIPs }} 9 | -------------------------------------------------------------------------------- /roles/k3s/templates/calico-bgppeer.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: crd.projectcalico.org/v1 3 | kind: BGPPeer 4 | metadata: 5 | name: global 6 | spec: 7 | peerIP: {{ calico.bgp.peer }} 8 | asNumber: {{ calico.bgp.as }} 9 | -------------------------------------------------------------------------------- /roles/k3s/templates/calico-ebpf-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: kubernetes-services-endpoint 6 | namespace: tigera-operator 7 | data: 8 | KUBERNETES_SERVICE_HOST: "{{ k3s_control_node_address }}" 9 | KUBERNETES_SERVICE_PORT: "6443" 10 | -------------------------------------------------------------------------------- /roles/k3s/templates/calico-installation.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operator.tigera.io/v1 3 | kind: Installation 4 | metadata: 5 | name: default 6 | spec: 7 | calicoNetwork: 8 | ipPools: 9 | - blockSize: 26 10 | cidr: "{{ k3s_service_cidr }}" 11 | encapsulation: IPIPCrossSubnet 12 | natOutgoing: Enabled 13 | nodeSelector: all() 14 | -------------------------------------------------------------------------------- /roles/k3s/templates/cilium-etcd-secrets.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | type: Opaque 5 | metadata: 6 | name: cilium-etcd-secrets 7 | namespace: kube-system 8 | data: 9 | etcd-client-ca.crt: "{{ slurped_etcd_ca_file.content }}" 10 | etcd-client.crt: "{{ slurped_etcd_cert_file.content }}" 11 | etcd-client.key: "{{ slurped_etcd_cert_key_file.content }}" 12 | -------------------------------------------------------------------------------- /roles/k3s/templates/cilium-helmchart.yaml.j2: -------------------------------------------------------------------------------- 1 | #jinja2:lstrip_blocks: True 2 | --- 3 | apiVersion: helm.cattle.io/v1 4 | kind: HelmChart 5 | metadata: 6 | name: cilium 7 | namespace: kube-system 8 | spec: 9 | chart: cilium 10 | repo: https://helm.cilium.io/ 11 | version: {{ cilium_kube_router.cilium.version }} 12 | targetNamespace: kube-system 13 | bootstrap: true 14 | valuesContent: |- 15 | global: 16 | registry: {{ cilium_kube_router.cilium.registry }} 17 | tag: v{{ cilium_kube_router.cilium.version }} 18 | pullPolicy: IfNotPresent 19 | k8sServiceHost: {{ k3s_control_node_address }} 20 | k8sServicePort: 6443 21 | {% if cilium_kube_router.cilium.etcd is defined and cilium_kube_router.cilium.etcd %} 22 | etcd: 23 | enabled: true 24 | ssl: true 25 | endpoints: 26 | {% for host in groups['master'] %} 27 | - "https://{{ hostvars[host].ansible_default_ipv4.address }}:2379" 28 | {% endfor %} 29 | {% endif %} 30 | tunnel: disabled 31 | autoDirectNodeRoutes: true 32 | kubeProxyReplacement: strict 33 | nativeRoutingCIDR: "{{ cilium_kube_router.cilium.nativeRoutingCIDR }}" 34 | endpointRoutes: 35 | enabled: true 36 | hostServices: 37 | enabled: true 38 | nodePort: 39 | enabled: true 40 | mode: dsr 41 | externalIPs: 42 | enabled: true 43 | ipam: 44 | operator: 45 | clusterPoolIPv4PodCIDR: "{{ k3s_cluster_cidr }}" 46 | clusterPoolIPv4MaskSize: 24 47 | containerRuntime: 48 | integration: containerd 49 | socketPath: /var/run/k3s/containerd/containerd.sock 50 | prometheus: 51 | enabled: true 52 | serviceMonitor: 53 | enabled: true 54 | operatorPrometheus: 55 | enabled: true 56 | hubble: 57 | enabled: true 58 | ui: 59 | enabled: true 60 | relay: 61 | enabled: true 62 | metrics: 63 | enabled: 64 | - dns 65 | - drop 66 | - tcp 67 | - flow 68 | - port-distribution 69 | - icmp 70 | - http 71 | serviceMonitor: 72 | enabled: true -------------------------------------------------------------------------------- /roles/k3s/templates/kube-router.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | labels: 6 | k8s-app: kube-router 7 | tier: node 8 | name: kube-router 9 | namespace: kube-system 10 | spec: 11 | selector: 12 | matchLabels: 13 | k8s-app: kube-router 14 | tier: node 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kube-router 19 | tier: node 20 | spec: 21 | priorityClassName: system-node-critical 22 | serviceAccountName: kube-router 23 | containers: 24 | - name: kube-router 25 | image: "{{ cilium_kube_router.kube_router.registry }}:v{{ cilium_kube_router.kube_router.version }}" 26 | imagePullPolicy: IfNotPresent 27 | args: 28 | - "--run-router=true" 29 | - "--run-firewall=false" 30 | - "--run-service-proxy=false" 31 | - "--enable-cni=false" 32 | - "--enable-pod-egress=false" 33 | - "--enable-ibgp=true" 34 | - "--enable-overlay=true" 35 | - "--advertise-cluster-ip=true" 36 | - "--advertise-external-ip=true" 37 | - "--advertise-loadbalancer-ip=true" 38 | - "--peer-router-ips={{ cilium_kube_router.kube_router.peerRouterIP }}" 39 | - "--peer-router-asns={{ cilium_kube_router.kube_router.peerRouterASNS }}" 40 | - "--cluster-asn={{ cilium_kube_router.kube_router.clusterASN }}" 41 | - "--metrics-path=/metrics" 42 | - "--metrics-port=8080" 43 | env: 44 | - name: NODE_NAME 45 | valueFrom: 46 | fieldRef: 47 | fieldPath: spec.nodeName 48 | livenessProbe: 49 | httpGet: 50 | path: /healthz 51 | port: 20244 52 | initialDelaySeconds: 10 53 | periodSeconds: 3 54 | resources: 55 | requests: 56 | cpu: 250m 57 | memory: 250Mi 58 | securityContext: 59 | privileged: true 60 | volumeMounts: 61 | - name: xtables-lock 62 | mountPath: /run/xtables.lock 63 | readOnly: false 64 | hostNetwork: true 65 | tolerations: 66 | - effect: NoSchedule 67 | operator: Exists 68 | - key: CriticalAddonsOnly 69 | operator: Exists 70 | - effect: NoExecute 71 | operator: Exists 72 | volumes: 73 | - name: xtables-lock 74 | hostPath: 75 | path: /run/xtables.lock 76 | type: FileOrCreate 77 | --- 78 | apiVersion: v1 79 | kind: ServiceAccount 80 | metadata: 81 | name: kube-router 82 | namespace: kube-system 83 | --- 84 | kind: ClusterRole 85 | apiVersion: rbac.authorization.k8s.io/v1beta1 86 | metadata: 87 | name: kube-router 88 | namespace: kube-system 89 | rules: 90 | - apiGroups: 91 | - "" 92 | resources: 93 | - namespaces 94 | - pods 95 | - services 96 | - nodes 97 | - endpoints 98 | verbs: 99 | - list 100 | - get 101 | - watch 102 | - apiGroups: 103 | - "networking.k8s.io" 104 | resources: 105 | - networkpolicies 106 | verbs: 107 | - list 108 | - get 109 | - watch 110 | - apiGroups: 111 | - extensions 112 | resources: 113 | - networkpolicies 114 | verbs: 115 | - get 116 | - list 117 | - watch 118 | --- 119 | kind: ClusterRoleBinding 120 | apiVersion: rbac.authorization.k8s.io/v1beta1 121 | metadata: 122 | name: kube-router 123 | roleRef: 124 | apiGroup: rbac.authorization.k8s.io 125 | kind: ClusterRole 126 | name: kube-router 127 | subjects: 128 | - kind: ServiceAccount 129 | name: kube-router 130 | namespace: kube-system 131 | -------------------------------------------------------------------------------- /roles/k3s/templates/registries.yaml.j2: -------------------------------------------------------------------------------- 1 | #jinja2:lstrip_blocks: True 2 | --- 3 | {% if registry.cache.enabled is defined and registry.cache.enabled %} 4 | mirrors: 5 | "docker.io": 6 | endpoint: 7 | - "{{ registry.cache.address }}" 8 | "*": 9 | endpoint: 10 | - "{{ registry.cache.address }}" 11 | {% endif %} 12 | {% if registry.custom.enabled is defined and registry.custom.enabled %} 13 | configs: 14 | "{{ registry.custom.address }}": 15 | auth: 16 | username: "{{ registry.custom.username }}" 17 | password: "{{ registry.custom.password }}" 18 | {% endif %} 19 | -------------------------------------------------------------------------------- /roles/k3s/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Prometheus Operator version from: 4 | # https://github.com/prometheus-operator/prometheus-operator/releases 5 | prometheus_operator_version: "0.42" 6 | 7 | # URLs to the Prometheus Operator CRDs 8 | prometheus_operator_crds: 9 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml" 10 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml" 11 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml" 12 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml" 13 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml" 14 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml" 15 | - "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-{{ prometheus_operator_version }}/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml" 16 | -------------------------------------------------------------------------------- /roles/longhorn/tasks/disks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: longhorn | disks | create mount directory 4 | file: 5 | path: /var/lib/longhorn 6 | state: directory 7 | 8 | - name: longhorn | disks | create new LVM volume group 9 | become: true 10 | lvg: 11 | vg: "longhorn-vg" 12 | pvs: "{{ longhorn.disks | join(',') }}" 13 | state: present 14 | 15 | - name: longhorn | disks | create new LVM logical volume 16 | become: true 17 | lvol: 18 | vg: "longhorn-vg" 19 | lv: "lv0" 20 | size: "100%FREE" 21 | shrink: false 22 | opts: "{{ longhorn.opts | default('') }}" 23 | state: present 24 | 25 | - name: longhorn | disks | create new filesystem on new LVM logical volume 26 | become: true 27 | filesystem: 28 | fstype: "{{ longhorn.filesystem | default('ext4') }}" 29 | dev: "/dev/longhorn-vg/lv0" 30 | resizefs: yes 31 | 32 | - name: longhorn | disks | mounting new filesystem 33 | become: true 34 | ansible.posix.mount: 35 | path: "{{ longhorn.mountpoint }}" 36 | src: "/dev/longhorn-vg/lv0" 37 | fstype: "{{ longhorn.filesystem }}" 38 | state: mounted 39 | opts: "{{ longhorn.mount_opts | default('defaults') }}" 40 | -------------------------------------------------------------------------------- /roles/longhorn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: cluster-meta.yml 4 | - include: multipath.yml 5 | - include: disks.yml 6 | -------------------------------------------------------------------------------- /roles/longhorn/tasks/meta.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: longhorn | meta | apply labels 3 | become: true 4 | run_once: true 5 | environment: 6 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 7 | command: kubectl label --overwrite node {{ hostvars[item]['ansible_hostname'] }} node.longhorn.io/create-default-disk=config 8 | # Cannot use loop w/ hostvars in when... https://github.com/ansible/ansible/issues/68364 9 | with_items: 10 | - "{{ groups['worker'] }}" 11 | register: apply_longhorn_labels 12 | retries: 5 13 | until: apply_longhorn_labels is success 14 | when: 15 | - k3s_control_node is defined 16 | - k3s_control_node 17 | - hostvars[item].longhorn.enabled is defined 18 | - hostvars[item].longhorn.enabled == true 19 | 20 | - name: longhorn | meta | apply annotations 21 | become: true 22 | run_once: true 23 | environment: 24 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 25 | command: kubectl annotate --overwrite node {{ hostvars[item]['ansible_hostname'] }} node.longhorn.io/default-disks-config='[{"path":"{{ hostvars[item]['longhorn']['mointpoint'] }}", "name":"longhorn", "allowScheduling":true}]' 26 | # Cannot use loop w/ hostvars in when... https://github.com/ansible/ansible/issues/68364 27 | with_items: 28 | - "{{ groups['worker'] }}" 29 | register: apply_longhorn_annotations 30 | retries: 5 31 | until: apply_longhorn_annotations is success 32 | when: 33 | - k3s_control_node is defined 34 | - k3s_control_node 35 | - hostvars[item].longhorn.enabled is defined 36 | - hostvars[item].longhorn.enabled == true 37 | 38 | - name: longhorn | meta | remove labels 39 | become: true 40 | run_once: true 41 | environment: 42 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 43 | command: kubectl label --overwrite node {{ hostvars[item]['ansible_hostname'] }} node.longhorn.io/create-default-disk- 44 | # Cannot use loop w/ hostvars in when... https://github.com/ansible/ansible/issues/68364 45 | with_items: 46 | - "{{ groups['worker'] }}" 47 | register: remove_longhorn_labels 48 | retries: 5 49 | until: remove_longhorn_labels is success 50 | when: 51 | - k3s_control_node is defined 52 | - k3s_control_node 53 | - (hostvars[item].longhorn.enabled is not defined or not hostvars[item].longhorn.enabled) 54 | 55 | - name: longhorn | meta | remove annotations 56 | become: true 57 | run_once: true 58 | environment: 59 | KUBECONFIG: "/etc/rancher/k3s/k3s.yaml" 60 | command: kubectl annotate --overwrite node {{ hostvars[item]['ansible_hostname'] }} node.longhorn.io/default-disks-config- 61 | # Cannot use loop w/ hostvars in when... https://github.com/ansible/ansible/issues/68364 62 | with_items: 63 | - "{{ groups['worker'] }}" 64 | register: remove_longhorn_annotations 65 | retries: 5 66 | until: remove_longhorn_annotations is success 67 | when: 68 | - k3s_control_node is defined 69 | - k3s_control_node 70 | - (hostvars[item].longhorn.enabled is not defined or not hostvars[item].longhorn.enabled) 71 | -------------------------------------------------------------------------------- /roles/longhorn/tasks/multipath.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # 4 | # https://github.com/longhorn/longhorn/issues/1210#issuecomment-671689746 5 | # 6 | 7 | - name: longhorn | multipath | create multipath config directory 8 | become: true 9 | file: 10 | path: "/etc/multipath/conf.d" 11 | state: directory 12 | 13 | - name: longhorn | multipath | blacklist multipath for sd* 14 | become: true 15 | copy: 16 | content: | 17 | blacklist { 18 | devnode "^sd[a-z0-9]+" 19 | } 20 | dest: /etc/multipath/conf.d/longhorn.conf 21 | 22 | - name: longhorn | multipathd | restart systemd service 23 | systemd: 24 | name: multipathd.service 25 | daemon_reload: no 26 | enabled: yes 27 | state: restarted -------------------------------------------------------------------------------- /roles/os/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: logind | restart systemd service 4 | systemd: 5 | name: systemd-logind.service 6 | daemon_reload: yes 7 | enabled: yes 8 | state: restarted 9 | listen: restart logind 10 | 11 | - name: keepalived | restart systemd service 12 | systemd: 13 | name: keepalived.service 14 | daemon_reload: yes 15 | enabled: yes 16 | state: restarted 17 | listen: restart keepalived 18 | 19 | - name: unattended-upgrades | restart systemd service 20 | service: 21 | name: unattended-upgrades.service 22 | daemon_reload: yes 23 | enabled: yes 24 | state: restarted 25 | listen: unattended-upgrades configuration changed 26 | 27 | - name: time | restart systemd service 28 | service: 29 | name: systemd-timesyncd.service 30 | daemon_reload: yes 31 | enabled: yes 32 | state: restarted 33 | listen: systemd-timesyncd configuration changed 34 | 35 | - name: time | write to system clock 36 | command: hwclock --systohc 37 | listen: systemd-timesyncd configuration changed 38 | when: ansible_virtualization_role == "host" 39 | or ansible_virtualization_role == "NA" 40 | 41 | - name: grub | mkconfig 42 | command: grub-mkconfig -o /boot/grub/grub.cfg 43 | listen: grub configuration changed 44 | -------------------------------------------------------------------------------- /roles/os/tasks/filesystem.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: filesystem | mount bpffs 4 | become: true 5 | ansible.posix.mount: 6 | src: bpffs 7 | path: /sys/fs/bpf 8 | fstype: bpf 9 | state: mounted 10 | opts: rw,nosuid,nodev,noexec,relatime,mode=700 11 | 12 | - name: filesystem | swap 13 | become: true 14 | block: 15 | - name: disable swap 16 | command: swapoff -a 17 | when: ansible_swaptotal_mb > 0 18 | - name: disable swap system start 19 | ansible.posix.mount: 20 | name: "{{ item }}" 21 | fstype: swap 22 | state: absent 23 | loop: 24 | - swap 25 | - none 26 | -------------------------------------------------------------------------------- /roles/os/tasks/host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: host | update hostname to inventory hostname 4 | become: true 5 | hostname: 6 | name: "{{ inventory_hostname }}" 7 | when: 8 | - ansible_hostname != inventory_hostname 9 | 10 | - name: host | update etc hosts 11 | become: true 12 | blockinfile: 13 | path: /etc/hosts 14 | create: yes 15 | block: | 16 | 127.0.0.1 localhost 17 | 127.0.1.1 {{ inventory_hostname }} 18 | 19 | # The following lines are desirable for IPv6 capable hosts 20 | ::1 ip6-localhost ip6-loopback 21 | fe00::0 ip6-localnet 22 | ff00::0 ip6-mcastprefix 23 | ff02::1 ip6-allnodes 24 | ff02::2 ip6-allrouters 25 | 26 | {% if keepalived.enabled is defined and keepalived.enabled %} 27 | # keepalived vip 28 | {{ keepalived.vip }} k8s-master-vip 29 | {% endif %} 30 | 31 | # k8s hosts 32 | {% for host in groups['all'] %} 33 | {{ hostvars[host].ansible_default_ipv4.address }} {{ host }} 34 | {% endfor %} 35 | -------------------------------------------------------------------------------- /roles/os/tasks/keepalived.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: keepalived | install package 4 | become: true 5 | apt: 6 | name: keepalived 7 | install_recommends: false 8 | update_cache: true 9 | autoclean: true 10 | autoremove: true 11 | register: apt_install_keepalived 12 | retries: 5 13 | until: apt_install_keepalived is success 14 | when: 15 | - k3s_control_node is defined 16 | - k3s_control_node 17 | - keepalived.enabled is defined 18 | - keepalived.enabled 19 | 20 | - name: keepalived | copy configuration 21 | become: true 22 | template: 23 | src: keepalived.conf.j2 24 | dest: /etc/keepalived/keepalived.conf 25 | notify: restart keepalived 26 | when: 27 | - k3s_control_node is defined 28 | - k3s_control_node 29 | - keepalived.enabled is defined 30 | - keepalived.enabled 31 | -------------------------------------------------------------------------------- /roles/os/tasks/kernel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: kernel | disable apparmor 4 | become: true 5 | replace: 6 | path: /etc/default/grub 7 | regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 8 | replace: '\1 {{ option }}={{ value }}\2' 9 | vars: 10 | option: apparmor 11 | value: 0 12 | 13 | - name: kernel | disable mitigations 14 | become: true 15 | replace: 16 | path: /etc/default/grub 17 | regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 18 | replace: '\1 {{ option }}={{ value }}\2' 19 | vars: 20 | option: mitigations 21 | value: "off" 22 | 23 | - name: kernel | increase loop device count 24 | become: true 25 | replace: 26 | path: /etc/default/grub 27 | regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 28 | replace: '\1 {{ option }}={{ value }}\2' 29 | vars: 30 | option: max_loop 31 | value: 32 32 | 33 | - name: kernel | enable modules 34 | become: true 35 | community.general.modprobe: 36 | name: "{{ item }}" 37 | state: present 38 | loop: 39 | - br_netfilter 40 | - overlay 41 | - rbd 42 | - ip_vs 43 | 44 | - name: kernel | enable modules on boot 45 | become: true 46 | copy: 47 | content: "{{ item }}" 48 | dest: "/etc/modules-load.d/{{ item }}.conf" 49 | loop: 50 | - br_netfilter 51 | - overlay 52 | - rbd 53 | - ip_vs 54 | 55 | # Blacklist for PiAware: https://hub.docker.com/r/mikenye/piaware 56 | - name: kernel | blacklist modules 57 | become: true 58 | community.general.kernel_blacklist: 59 | name: "{{ item }}" 60 | state: present 61 | loop: 62 | - dvb_usb_rtl28xxu 63 | - rtl2832 64 | - rtl2832_sdr 65 | 66 | # Blacklist for PiAware: https://hub.docker.com/r/mikenye/piaware 67 | - name: kernel | blacklist modules on boot 68 | become: true 69 | copy: 70 | content: "blacklist {{ item }}" 71 | dest: "/etc/modprobe.d/{{ item }}.conf" 72 | loop: 73 | - dvb_usb_rtl28xxu 74 | - rtl2832 75 | - rtl2832_sdr 76 | -------------------------------------------------------------------------------- /roles/os/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: host.yml 4 | tags: 5 | - host 6 | 7 | - include: time.yml 8 | tags: 9 | - time 10 | 11 | - include: packages.yml 12 | tags: 13 | - packages 14 | 15 | - include: power-button.yml 16 | tags: 17 | - power-button 18 | 19 | - include: motd.yml 20 | tags: 21 | - motd 22 | 23 | - include: kernel.yml 24 | tags: 25 | - kernel 26 | 27 | - include: network.yml 28 | tags: 29 | - network 30 | 31 | - include: filesystem.yml 32 | tags: 33 | - filesystem 34 | 35 | - include: unattended-upgrades.yml 36 | tags: 37 | - unattended-upgrades 38 | 39 | - include: user.yml 40 | tags: 41 | - user 42 | 43 | - include: keepalived.yml 44 | when: 45 | - k3s_control_node is defined 46 | - k3s_control_node 47 | - keepalived.enabled is defined 48 | - keepalived.enabled 49 | tags: 50 | - keepalived 51 | 52 | - include: rsyslog.yml 53 | when: 54 | - rsyslog.enabled is defined 55 | - rsyslog.enabled 56 | tags: 57 | - rsyslog 58 | -------------------------------------------------------------------------------- /roles/os/tasks/motd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: motd | disable 4 | lineinfile: 5 | dest: /etc/default/motd-news 6 | state: present 7 | regexp: '^ENABLED=' 8 | line: 'ENABLED=0' 9 | -------------------------------------------------------------------------------- /roles/os/tasks/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # k3s still recommended use legacy iptables 4 | # https://rancher.com/docs/k3s/latest/en/known-issues/ 5 | - name: network | configure nodes to use legacy iptables 6 | become: true 7 | community.general.alternatives: 8 | name: "{{ item }}" 9 | path: /usr/sbin/{{ item }}-legacy 10 | loop: 11 | - "iptables" 12 | - "ebtables" 13 | 14 | # k3s still recommended use legacy iptables 15 | # https://rancher.com/docs/k3s/latest/en/known-issues/ 16 | - name: network | configure nodes to use legacy ip6tables 17 | become: true 18 | community.general.alternatives: 19 | name: "{{ item }}" 20 | path: /usr/sbin/{{ item }}-legacy 21 | loop: 22 | - "ip6tables" 23 | when: 24 | - disable_ipv6 is not defined or not disable_ipv6 25 | 26 | - name: network | modules | unload ipv6 27 | become: true 28 | community.general.modprobe: 29 | name: "{{ item }}" 30 | state: absent 31 | loop: 32 | - ipv6 33 | when: 34 | - disable_ipv6 is defined 35 | - disable_ipv6 36 | 37 | - name: network | modules | load ipv6 38 | become: true 39 | community.general.modprobe: 40 | name: "{{ item }}" 41 | state: present 42 | loop: 43 | - ipv6 44 | when: 45 | - disable_ipv6 is not defined or not disable_ipv6 46 | 47 | - name: network | modules | blacklist ipv6 48 | become: true 49 | copy: 50 | content: "blacklist {{ item }}" 51 | dest: "/etc/modprobe.d/{{ item }}.conf" 52 | loop: 53 | - ipv6 54 | when: 55 | - disable_ipv6 is defined 56 | - disable_ipv6 57 | 58 | - name: network | modules | blacklist ipv6 59 | become: true 60 | file: 61 | path: "/etc/modprobe.d/{{ item }}.conf" 62 | state: absent 63 | loop: 64 | - ipv6 65 | when: 66 | - disable_ipv6 is not defined or not disable_ipv6 67 | 68 | - name: network | grub | disable ipv6 69 | become: true 70 | replace: 71 | path: /etc/default/grub 72 | regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 73 | replace: '\1 {{ option }}={{ value }}\2' 74 | vars: 75 | option: "ipv6.disable" 76 | value: "1" 77 | notify: grub configuration changed 78 | when: 79 | - disable_ipv6 is defined 80 | - disable_ipv6 81 | 82 | - name: network | grub | enable ipv6 83 | become: true 84 | replace: 85 | path: /etc/default/grub 86 | regexp: '^(GRUB_CMDLINE_LINUX_DEFAULT=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 87 | replace: '\1 {{ option }}={{ value }}\2' 88 | vars: 89 | option: "ipv6.disable" 90 | value: "0" 91 | notify: grub configuration changed 92 | when: 93 | - disable_ipv6 is not defined or not disable_ipv6 94 | 95 | - name: network | sysctl 96 | become: true 97 | blockinfile: 98 | path: /etc/sysctl.d/99-kubernetes-cri.conf 99 | create: yes 100 | block: | 101 | # ipv4 102 | net.ipv4.ip_forward = 1 103 | net.bridge.bridge-nf-call-arptables = 1 104 | net.bridge.bridge-nf-call-iptables = 1 105 | {% if disable_ipv6 is defined and disable_ipv6 %} 106 | # disable ipv6 107 | net.ipv6.conf.all.disable_ipv6 = 1 108 | net.ipv6.conf.default.disable_ipv6 = 1 109 | net.ipv6.conf.lo.disable_ipv6 = 1 110 | {% else %} 111 | # ipv6 112 | net.bridge.bridge-nf-call-ip6tables = 1 113 | net.ipv6.ip_forward = 1 114 | {% endif %} 115 | -------------------------------------------------------------------------------- /roles/os/tasks/packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: packages | upgrade all packages 4 | apt: 5 | upgrade: full 6 | update_cache: yes 7 | cache_valid_time: 3600 8 | autoclean: yes 9 | autoremove: yes 10 | register: apt_upgrade 11 | retries: 5 12 | until: apt_upgrade is success 13 | 14 | - name: packages | install common 15 | become: true 16 | apt: 17 | name: "{{ packages.install }}" 18 | install_recommends: false 19 | update_cache: true 20 | cache_valid_time: 3600 21 | autoclean: true 22 | autoremove: true 23 | register: apt_install_common 24 | retries: 5 25 | until: apt_install_common is success 26 | when: 27 | - packages.install is defined 28 | - packages.install is iterable 29 | - packages.install | length > 0 30 | 31 | - name: packages | gather install packages 32 | package_facts: 33 | manager: auto 34 | when: 35 | - packages.remove is defined 36 | - packages.remove is iterable 37 | - packages.remove | length > 0 38 | - "'snapd' in packages.remove" 39 | 40 | # Until focal has https://launchpad.net/ubuntu/+source/htop 41 | - name: packages | install latest htop 42 | become: true 43 | apt: 44 | deb: "https://launchpad.net/ubuntu/+archive/primary/+files/htop_{{ packages.deb.htop_version | default('3.0.2-1') }}_amd64.deb" 45 | register: apt_install_htop 46 | retries: 5 47 | until: apt_install_htop is success 48 | when: 49 | - "'htop' not in ansible_facts.packages" 50 | - "'htop' not in packages.install" 51 | - packages.deb.htop_version is defined 52 | - packages.deb.htop_version 53 | 54 | - name: packages | check if snap is installed 55 | debug: 56 | msg: "snapd is installed" 57 | register: snapd_check 58 | when: 59 | - "'snapd' in ansible_facts.packages" 60 | - packages.remove is defined 61 | - packages.remove is iterable 62 | - packages.remove | length > 0 63 | - "'snapd' in packages.remove" 64 | 65 | - name: packages | remove snap packages 66 | become: true 67 | command: snap remove {{ item }} 68 | loop: 69 | - lxd 70 | - core18 71 | - snapd 72 | when: 73 | - ansible_distribution == 'Ubuntu' 74 | - snapd_check.failed is defined 75 | - packages.remove is defined 76 | - packages.remove is iterable 77 | - packages.remove | length > 0 78 | - "'snapd' in packages.remove" 79 | 80 | - name: packages | remove packages 81 | become: true 82 | apt: 83 | name: "{{ packages.remove }}" 84 | state: absent 85 | autoremove: true 86 | when: 87 | - packages.remove is defined 88 | - packages.remove is iterable 89 | - packages.remove | length > 0 90 | 91 | - name: packages | remove file and directory cruft 92 | become: true 93 | file: 94 | state: absent 95 | path: "{{ item }}" 96 | loop: 97 | - "/home/{{ ansible_user }}/.snap" 98 | - "/snap" 99 | - "/var/snap" 100 | - "/var/lib/snapd" 101 | - "/var/cache/snapd" 102 | - "/usr/lib/snapd" 103 | - "/etc/cloud" 104 | - "/var/lib/cloud" 105 | when: 106 | - packages.remove is defined 107 | - packages.remove is iterable 108 | - packages.remove | length > 0 109 | - "'snapd' in packages.remove" 110 | - "'cloud-init' in packages.remove" 111 | -------------------------------------------------------------------------------- /roles/os/tasks/power-button.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: power-button | disable single power button press shutdown 4 | lineinfile: 5 | path: /etc/systemd/logind.conf 6 | regexp: "{{ item.setting }}" 7 | line: "{{ item.setting }}={{ item.value }}" 8 | loop: 9 | - { setting: HandlePowerKey, value: ignore } 10 | notify: restart logind 11 | -------------------------------------------------------------------------------- /roles/os/tasks/rsyslog.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: rsyslog 4 | become: true 5 | block: 6 | - name: rsyslog | copy configuration 7 | copy: 8 | content: | 9 | module(load="omprog") 10 | module(load="mmutf8fix") 11 | action(type="mmutf8fix" replacementChar="?") 12 | action(type="omfwd" protocol="tcp" target="{{ rsyslog.ip }}" port="{{ rsyslog.port }}" Template="RSYSLOG_SyslogProtocol23Format" TCP_Framing="octet-counted" KeepAlive="on") 13 | dest: /etc/rsyslog.d/50-promtail.conf 14 | - name: start systemd service 15 | systemd: 16 | name: rsyslog 17 | enabled: true 18 | state: started 19 | - name: rsyslog | restart systemd service 20 | systemd: 21 | name: rsyslog.service 22 | daemon_reload: true 23 | enabled: true 24 | state: restarted 25 | when: 26 | - rsyslog.enabled is defined 27 | - rsyslog.enabled 28 | -------------------------------------------------------------------------------- /roles/os/tasks/time.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: time | set timezone 4 | become: true 5 | community.general.timezone: 6 | name: "{{ timezone | default('America/New_York') }}" 7 | notify: systemd-timesyncd configuration changed 8 | 9 | - name: time | copy timesyncd config 10 | become: true 11 | copy: 12 | content: | 13 | [Time] 14 | NTP={{ ntp_servers.primary | default("") | join(" ") }} 15 | FallbackNTP={{ ntp_servers.fallback | join(" ") }} 16 | dest: /etc/systemd/timesyncd.conf 17 | notify: systemd-timesyncd configuration changed 18 | 19 | - name: time | start systemd service 20 | become: true 21 | systemd: 22 | name: systemd-timesyncd 23 | enabled: true 24 | state: started 25 | 26 | - name: time | run timedatectl status 27 | command: /usr/bin/timedatectl show 28 | changed_when: false 29 | check_mode: false 30 | register: timedatectl_result 31 | 32 | - name: time | enable ntp 33 | become: true 34 | command: /usr/bin/timedatectl set-ntp true 35 | when: 36 | - "'NTP=no' in timedatectl_result.stdout" 37 | -------------------------------------------------------------------------------- /roles/os/tasks/unattended-upgrades.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: unattended-upgrades | delete 20auto-upgrades config 4 | become: true 5 | file: 6 | path: /etc/apt/apt.conf.d/20auto-upgrades 7 | state: absent 8 | 9 | - name: unattended-upgrades | copy 10periodic config 10 | become: true 11 | copy: 12 | content: | 13 | APT::Periodic::Update-Package-Lists "1"; 14 | APT::Periodic::Download-Upgradeable-Packages "1"; 15 | APT::Periodic::AutocleanInterval "7"; 16 | APT::Periodic::Unattended-Upgrade "1"; 17 | dest: /etc/apt/apt.conf.d/10periodic 18 | notify: unattended-upgrades configuration changed 19 | 20 | - name: unattended-upgrades | copy 50unattended-upgrades config 21 | become: true 22 | copy: 23 | content: | 24 | Unattended-Upgrade::Automatic-Reboot "false"; 25 | Unattended-Upgrade::Remove-Unused-Dependencies "true"; 26 | Unattended-Upgrade::Allowed-Origins { 27 | "${distro_id}:${distro_codename}"; 28 | "${distro_id} ${distro_codename}-security"; 29 | }; 30 | dest: /etc/apt/apt.conf.d/50unattended-upgrades 31 | notify: unattended-upgrades configuration changed 32 | 33 | - name: unattended-upgrades | start systemd service 34 | become: true 35 | systemd: 36 | name: unattended-upgrades 37 | enabled: true 38 | state: started 39 | -------------------------------------------------------------------------------- /roles/os/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: user | add to sudoers 4 | become: true 5 | copy: 6 | content: "{{ ansible_user }} ALL=(ALL:ALL) NOPASSWD:ALL" 7 | dest: "/etc/sudoers.d/{{ ansible_user }}_nopasswd" 8 | mode: "0440" 9 | 10 | - name: user | add additional SSH public keys 11 | ansible.posix.authorized_key: 12 | user: "{{ ansible_user }}" 13 | key: "{{ item }}" 14 | loop: "{{ ssh_authorized_keys }}" 15 | when: 16 | - ssh_authorized_keys is defined 17 | - ssh_authorized_keys is iterable 18 | - ssh_authorized_keys | length > 0 19 | 20 | - name: user | check if hushlogin exists 21 | stat: 22 | path: "/home/{{ ansible_user }}/.hushlogin" 23 | register: hushlogin 24 | 25 | - name: user | silence the login prompt 26 | file: 27 | dest: "/home/{{ ansible_user }}/.hushlogin" 28 | state: touch 29 | owner: "{{ ansible_user }}" 30 | mode: "0775" 31 | when: not hushlogin.stat.exists 32 | 33 | - name: user | create config directory 34 | file: 35 | path: "/home/{{ ansible_user }}/.config" 36 | state: directory 37 | recurse: true 38 | owner: "{{ ansible_user }}" 39 | mode: "0775" 40 | 41 | - name: user | create neofetch directory 42 | file: 43 | path: "/home/{{ ansible_user }}/.config/neofetch" 44 | state: directory 45 | recurse: true 46 | owner: "{{ ansible_user }}" 47 | mode: "0775" 48 | 49 | - name: user | copy neofetch configuration 50 | copy: 51 | owner: "{{ ansible_user }}" 52 | mode: "0775" 53 | content: | 54 | print_info() { 55 | prin 56 | info title 57 | info "OS" distro 58 | info "Kernel" kernel 59 | info "CPU Usage" cpu_usage 60 | info "Memory Usage" memory 61 | info "Disk" disk 62 | info "Local IP" local_ip 63 | info "Uptime" uptime 64 | } 65 | dest: "/home/{{ ansible_user }}/.config/neofetch/config.conf" 66 | 67 | - name: user | copy ascii art 68 | copy: 69 | owner: "{{ ansible_user }}" 70 | mode: "0775" 71 | content: | 72 | ${c1} 73 | ::: ::: :::::::: :::::::: 74 | :+: :+: :+: :+: :+: :+: 75 | +:+ +:+ +:+ +:+ +:+ 76 | +#++:++ +#++:++# +#++:++#++ 77 | +#+ +#+ +#+ +#+ +#+ 78 | #+# #+# #+# #+# #+# #+# 79 | ### ### ######## ######## 80 | dest: "/home/{{ ansible_user }}/.config/neofetch/k8s.art" 81 | 82 | - name: user | enable neofetch on login 83 | copy: 84 | content: "neofetch --source /home/{{ ansible_user }}/.config/neofetch/k8s.art --ascii_colors 5 --colors 5 --separator ' :'" 85 | dest: "/home/{{ ansible_user }}/.bash_profile" 86 | owner: "{{ ansible_user }}" 87 | mode: "0775" 88 | -------------------------------------------------------------------------------- /roles/os/templates/keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | router_id LVS_DEVEL 3 | } 4 | 5 | vrrp_script chk_k3s_server { 6 | script "killall -0 k3s-server" 7 | interval 2 8 | weight 2 9 | } 10 | 11 | vrrp_instance VI_1 { 12 | state {{ 'MASTER' if groups['master'][0] == ansible_hostname else 'BACKUP' }} 13 | interface {{ keepalived.interface }} 14 | virtual_router_id 1 15 | priority {{ '150' if groups['master'][0] == ansible_hostname else '149' }} 16 | advert_int 1 17 | authentication { 18 | auth_type PASS 19 | auth_pass kubernetes 20 | } 21 | virtual_ipaddress { 22 | {{ keepalived.vip }}/24 23 | } 24 | track_script { 25 | chk_k3s_server 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /roles/os/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | packages: 4 | deb: 5 | htop_version: 3.0.2-1 6 | install: 7 | - apt-transport-https 8 | - arptables 9 | - ca-certificates 10 | - curl 11 | - dnsutils 12 | - ebtables 13 | - ethtool 14 | - git 15 | - gnupg-agent 16 | - gnupg2 17 | - haveged 18 | - hdparm 19 | - iperf3 20 | - iputils-ping 21 | - ipvsadm 22 | - jq 23 | - lvm2 24 | - neofetch 25 | - net-tools 26 | - netcat 27 | - nfs-common 28 | - nmap 29 | - ntpdate 30 | - open-iscsi 31 | - psmisc 32 | - python3 33 | - python3-openssl 34 | - python3-pip 35 | - rsync 36 | - scsitools 37 | - socat 38 | - software-properties-common 39 | - traceroute 40 | - unattended-upgrades 41 | - unzip 42 | - vim 43 | remove: 44 | - apparmor 45 | - apport 46 | - bcache-tools 47 | - btrfs-progs 48 | - byobu 49 | - cloud-init 50 | - cloud-guest-utils 51 | - cloud-initramfs-copymods 52 | - cloud-initramfs-dyn-netconf 53 | - friendly-recovery 54 | - fwupd 55 | - landscape-common 56 | - lxd-agent-loader 57 | - ntfs-3g 58 | - open-vm-tools 59 | - plymouth 60 | - plymouth-theme-ubuntu-text 61 | - popularity-contest 62 | - snapd 63 | - sosreport 64 | - tmux 65 | - ubuntu-advantage-tools 66 | - ufw 67 | -------------------------------------------------------------------------------- /tests/TEST.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | https://github.com/bitnami/charts/tree/master/bitnami/metallb 4 | 5 | ``` 6 | helm repo add bitnami https://charts.bitnami.com/bitnami 7 | helm repo update 8 | helm install metallb --kubeconfig ./kubeconfig --namespace kube-system --version "0.1.23" --values ./tests/metallb-values.yaml bitnami/metallb 9 | helm uninstall metallb --kubeconfig ./kubeconfig --namespace kube-system 10 | ``` 11 | 12 | --- 13 | 14 | https://github.com/kubernetes/ingress-nginx/tree/master/charts/ingress-nginx 15 | 16 | ``` 17 | helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx 18 | helm repo update 19 | helm install nginx-ingress --kubeconfig ./kubeconfig --namespace default --version "2.16.0" --values ./tests/nginx-ingress-values.yaml ingress-nginx/ingress-nginx 20 | helm uninstall nginx-ingress --kubeconfig ./kubeconfig --namespace default 21 | ``` 22 | 23 | --- 24 | 25 | https://github.com/longhorn/longhorn/tree/master/chart 26 | 27 | ``` 28 | helm repo add longhorn https://charts.longhorn.io 29 | helm repo update 30 | kubectl create namespace longhorn-system 31 | helm install longhorn --kubeconfig ./kubeconfig --namespace longhorn-system --version "1.0.2" --values ./tests/longhorn-values.yaml longhorn/longhorn 32 | helm uninstall longhorn --kubeconfig ./kubeconfig --namespace longhorn-system 33 | ``` 34 | 35 | --- 36 | 37 | ``` 38 | helm repo add k8s-at-home https://k8s-at-home.com/charts/ 39 | helm repo update 40 | helm install home-assistant --kubeconfig ./kubeconfig --namespace default --version "2.1.0" --values ./tests/home-assistant-values.yaml k8s-at-home/home-assistant 41 | helm uninstall home-assistant --kubeconfig ./kubeconfig --namespace default 42 | ``` 43 | 44 | --- 45 | 46 | ``` 47 | helm repo add https://helm.cilium.io/ 48 | helm upgrade -i cilium cilium/cilium --version 1.8.3 --namespace kube-system --values ./tests/cilium-values.yaml 49 | ``` -------------------------------------------------------------------------------- /tests/banana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: banana-app 6 | labels: 7 | app: banana 8 | spec: 9 | containers: 10 | - name: banana-app 11 | image: hashicorp/http-echo 12 | args: 13 | - "-text=banana" 14 | --- 15 | kind: Service 16 | apiVersion: v1 17 | metadata: 18 | name: banana-service 19 | spec: 20 | externalIPs: 21 | - 192.168.169.40 22 | selector: 23 | app: banana 24 | ports: 25 | # - name: http 26 | # port: 5678 27 | # targetPort: 5678 28 | - port: 5678 29 | type: LoadBalancer 30 | # type: ClusterIP 31 | # externalTrafficPolicy: Local 32 | # loadBalancerIP: 192.168.169.40 -------------------------------------------------------------------------------- /tests/cilium-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | registry: docker.io/cilium 3 | tag: v1.8.3 4 | k8sServiceHost: 192.168.42.199 5 | k8sServicePort: 6443 6 | tunnel: vxlan 7 | nativeRoutingCIDR: "10.90.0.0/16" 8 | externalIPs: 9 | enabled: true 10 | ipam: 11 | operator: 12 | clusterPoolIPv4PodCIDR: "10.90.0.0/16" 13 | clusterPoolIPv4MaskSize: 24 14 | containerRuntime: 15 | integration: containerd 16 | socketPath: /var/run/k3s/containerd/containerd.sock 17 | hubble: 18 | enabled: true 19 | ui: 20 | enabled: true 21 | relay: 22 | enabled: true 23 | metrics: 24 | enabled: 25 | - dns 26 | - drop 27 | - tcp 28 | - flow 29 | - port-distribution 30 | - icmp 31 | - http 32 | -------------------------------------------------------------------------------- /tests/home-assistant-values.yaml: -------------------------------------------------------------------------------- 1 | # nginx-ingress-values.yaml 2 | --- 3 | 4 | service: 5 | type: LoadBalancer 6 | loadBalancerIP: 172.16.20.102 7 | annotations: 8 | metallb.universe.tf/allow-shared-ip: "home-assistant" 9 | persistence: 10 | enabled: true 11 | storageClass: "longhorn" 12 | size: 250Mi 13 | vscode: 14 | enabled: true 15 | service: 16 | type: LoadBalancer 17 | loadBalancerIP: 172.16.20.102 18 | annotations: 19 | metallb.universe.tf/allow-shared-ip: "home-assistant" 20 | postgresql: 21 | enabled: true 22 | global: 23 | postgresql: 24 | postgresqlUsername: home-assistant 25 | postgresqlPassword: home-assistant-pass 26 | postgresqlDatabase: home-assistant 27 | persistence: 28 | enabled: true 29 | storageClass: "longhorn" 30 | size: 250Mi -------------------------------------------------------------------------------- /tests/kube-router.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | labels: 6 | k8s-app: kube-router 7 | tier: node 8 | name: kube-router 9 | namespace: kube-system 10 | spec: 11 | selector: 12 | matchLabels: 13 | k8s-app: kube-router 14 | tier: node 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kube-router 19 | tier: node 20 | spec: 21 | priorityClassName: system-node-critical 22 | serviceAccountName: kube-router 23 | containers: 24 | - name: kube-router 25 | image: docker.io/cloudnativelabs/kube-router:v1.0.1 26 | imagePullPolicy: IfNotPresent 27 | args: 28 | - "--run-router=true" 29 | - "--run-firewall=false" 30 | - "--run-service-proxy=false" 31 | - "--enable-cni=false" 32 | - "--enable-pod-egress=false" 33 | - "--enable-ibgp=true" 34 | - "--enable-overlay=true" 35 | - "--advertise-cluster-ip=true" 36 | - "--advertise-external-ip=true" 37 | - "--advertise-loadbalancer-ip=true" 38 | - "--peer-router-ips=192.168.42.1" 39 | - "--peer-router-asns=64512" 40 | - "--cluster-asn=64512" 41 | env: 42 | - name: NODE_NAME 43 | valueFrom: 44 | fieldRef: 45 | fieldPath: spec.nodeName 46 | livenessProbe: 47 | httpGet: 48 | path: /healthz 49 | port: 20244 50 | initialDelaySeconds: 10 51 | periodSeconds: 3 52 | resources: 53 | requests: 54 | cpu: 250m 55 | memory: 250Mi 56 | securityContext: 57 | privileged: true 58 | volumeMounts: 59 | - name: xtables-lock 60 | mountPath: /run/xtables.lock 61 | readOnly: false 62 | hostNetwork: true 63 | tolerations: 64 | - effect: NoSchedule 65 | operator: Exists 66 | - key: CriticalAddonsOnly 67 | operator: Exists 68 | - effect: NoExecute 69 | operator: Exists 70 | volumes: 71 | - name: xtables-lock 72 | hostPath: 73 | path: /run/xtables.lock 74 | type: FileOrCreate 75 | --- 76 | apiVersion: v1 77 | kind: ServiceAccount 78 | metadata: 79 | name: kube-router 80 | namespace: kube-system 81 | --- 82 | kind: ClusterRole 83 | apiVersion: rbac.authorization.k8s.io/v1beta1 84 | metadata: 85 | name: kube-router 86 | namespace: kube-system 87 | rules: 88 | - apiGroups: 89 | - "" 90 | resources: 91 | - namespaces 92 | - pods 93 | - services 94 | - nodes 95 | - endpoints 96 | verbs: 97 | - list 98 | - get 99 | - watch 100 | - apiGroups: 101 | - "networking.k8s.io" 102 | resources: 103 | - networkpolicies 104 | verbs: 105 | - list 106 | - get 107 | - watch 108 | - apiGroups: 109 | - extensions 110 | resources: 111 | - networkpolicies 112 | verbs: 113 | - get 114 | - list 115 | - watch 116 | --- 117 | kind: ClusterRoleBinding 118 | apiVersion: rbac.authorization.k8s.io/v1beta1 119 | metadata: 120 | name: kube-router 121 | roleRef: 122 | apiGroup: rbac.authorization.k8s.io 123 | kind: ClusterRole 124 | name: kube-router 125 | subjects: 126 | - kind: ServiceAccount 127 | name: kube-router 128 | namespace: kube-system 129 | -------------------------------------------------------------------------------- /tests/longhorn-values.yaml: -------------------------------------------------------------------------------- 1 | # longhorn-values.yaml 2 | --- 3 | 4 | persistence: 5 | defaultClassReplicaCount: 1 6 | defaultSettings: 7 | defaultReplicaCount: 1 8 | createDefaultDiskLabeledNodes: true 9 | service: 10 | ui: 11 | type: NodePort 12 | nodePort: 30234 13 | # ingress: 14 | # enabled: true 15 | # annotations: 16 | # kubernetes.io/ingress.class: "nginx" 17 | # host: "longhorn.devbu.io" -------------------------------------------------------------------------------- /tests/metallb-values.yaml: -------------------------------------------------------------------------------- 1 | # metallb-values.yaml 2 | --- 3 | 4 | configInline: 5 | address-pools: 6 | - name: my-awesome-network 7 | protocol: layer2 8 | addresses: 9 | # - 172.16.20.100-172.16.20.110 10 | - 192.168.169.0/24 11 | # Cilium + Kube-router: 12 | # k delete ds/metallb-speaker -n kube-system 13 | -------------------------------------------------------------------------------- /tests/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | template: 10 | metadata: 11 | labels: 12 | app: nginx 13 | spec: 14 | nodeSelector: 15 | apptype: nginx 16 | containers: 17 | - name: nginx 18 | image: nginx:1 19 | ports: 20 | - name: http 21 | containerPort: 80 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: nginx-svc 27 | spec: 28 | ports: 29 | - name: http 30 | port: 80 31 | protocol: TCP 32 | targetPort: 80 33 | selector: 34 | app: nginx -------------------------------------------------------------------------------- /tests/nginx-ingress-values.yaml: -------------------------------------------------------------------------------- 1 | # nginx-ingress-values.yaml 2 | --- 3 | 4 | controller: 5 | service: 6 | type: LoadBalancer 7 | loadBalancerIP: 172.16.20.101 8 | -------------------------------------------------------------------------------- /tests/repos.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: source.toolkit.fluxcd.io/v1beta1 4 | kind: HelmRepository 5 | metadata: 6 | name: k8s-at-home 7 | namespace: gotk-system 8 | spec: 9 | interval: 1m 10 | url: https://k8s-at-home.com/charts/ 11 | --------------------------------------------------------------------------------