├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── LICENSE ├── README.md ├── config └── nginx.conf.tftpl ├── docs └── roll-node-pools.md ├── example ├── README.md └── main.tf ├── master_nodes.tf ├── outputs.tf ├── scripts ├── install-k3s-server.sh.tftpl └── install-support-apps.sh.tftpl ├── support_node.tf ├── variables.tf ├── versions.tf └── worker_nodes.tf /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | 11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 12 | # .tfvars files are managed as part of configuration and so should be included in 13 | # version control. 14 | # 15 | # example.tfvars 16 | 17 | # Ignore override files as they are usually used to override resources locally and so 18 | # are not checked in 19 | override.tf 20 | override.tf.json 21 | *_override.tf 22 | *_override.tf.json 23 | 24 | # Include override files you do wish to add to version control using negated pattern 25 | # 26 | # !example_override.tf 27 | 28 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 29 | # example: *tfplan* 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Frank 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-proxmox-k3s 2 | 3 | A module for spinning up an expandable and flexible K3s server for your HomeLab. 4 | 5 | ## Features 6 | 7 | - Fully automated. No need to remote into a VM; even for a kubeconfig 8 | - Built in and automatically configured external loadbalancer (both K3s API and ingress) 9 | - Static(ish) MAC addresses for reproducible DHCP reservations 10 | - Node pools to easily scale and to handle many kinds of workloads 11 | - Pure Terraform - no Ansible needed. 12 | 13 | ## Prerequisites 14 | 15 | - A Proxmox node with sufficient capacity for all nodes 16 | - A cloneable or template VM with a size that does not exceed the smallest node size (10G currently) that supports Cloud-init and is based on Debian 17 | (ideally ubuntu server) 18 | - 2 cidr ranges for master and worker nodes NOT handed out by DHCP (nodes are 19 | configured with static IPs from these ranges) 20 | 21 | ## Usage 22 | 23 | > Take a look at the complete auto-generated docs on the 24 | [Official Registry Page](https://registry.terraform.io/modules/fvumbaca/k3s/proxmox/latest). 25 | 26 | ```terraform 27 | module "k3s" { 28 | source = "fvumbaca/k3s/proxmox" 29 | version = ">= 0.0.0, < 1.0.0" # Get latest 0.X release 30 | 31 | authorized_keys_file = "authorized_keys" 32 | 33 | proxmox_node = "my-proxmox-node" 34 | 35 | node_template = "ubuntu-template" 36 | proxmox_resource_pool = "my-k3s" 37 | 38 | network_gateway = "192.168.0.1" 39 | lan_subnet = "192.168.0.0/24" 40 | 41 | support_node_settings = { 42 | cores = 2 43 | memory = 4096 44 | } 45 | 46 | # Disable default traefik and servicelb installs for metallb and traefik 2 47 | k3s_disable_components = [ 48 | "traefik", 49 | "servicelb" 50 | ] 51 | 52 | master_nodes_count = 2 53 | master_node_settings = { 54 | cores = 2 55 | memory = 4096 56 | } 57 | 58 | # 192.168.0.200 -> 192.168.0.207 (6 available IPs for nodes) 59 | control_plane_subnet = "192.168.0.200/29" 60 | 61 | node_pools = [ 62 | { 63 | name = "default" 64 | size = 2 65 | # 192.168.0.208 -> 192.168.0.223 (14 available IPs for nodes) 66 | subnet = "192.168.0.208/28" 67 | } 68 | ] 69 | } 70 | ``` 71 | 72 | ### Retrieve Kubeconfig 73 | 74 | To get the kubeconfig for your new K3s first make sure to forward the module 75 | output in your project's output: 76 | 77 | ```terraform 78 | output "kubeconfig" { 79 | # Update module name. Here we are using 'k3s' 80 | value = module.k3s.k3s_kubeconfig 81 | sensitive = true 82 | } 83 | ``` 84 | 85 | Finally output the config file: 86 | 87 | ```sh 88 | terraform output -raw kubeconfig > config.yaml 89 | # Test out the config: 90 | kubectl --kubeconfig config.yaml get nodes 91 | ``` 92 | 93 | > Make sure your support node is routable from the computer you are running the 94 | command on! 95 | 96 | ## Runbooks and Documents 97 | 98 | - [Basic cluster example](example) 99 | - [How to roll (update) your nodes](docs/roll-node-pools.md) 100 | 101 | ## Why use nodepools and subnets? 102 | 103 | This module is designed with nodepools and subnets to allow for changes to the 104 | cluster composition in the future. If later on, you want to add another master 105 | or worker node, you can do so without needing to teardown/modify existing 106 | nodes. Nodepools are key if you plan to support nodes with different nodepool 107 | capabilities in the future without impacting other nodes. 108 | 109 | ## Todo 110 | 111 | - [ ] Add variable to allow workloads on master nodes 112 | -------------------------------------------------------------------------------- /config/nginx.conf.tftpl: -------------------------------------------------------------------------------- 1 | load_module /usr/lib/nginx/modules/ngx_stream_module.so; 2 | 3 | events { 4 | worker_connections 786; 5 | } 6 | 7 | stream { 8 | upstream k3s_servers { 9 | %{ for host in k3s_server_hosts ~} 10 | server ${host}; 11 | %{ endfor ~} 12 | } 13 | 14 | server { 15 | listen 6443; 16 | proxy_pass k3s_servers; 17 | } 18 | 19 | upstream k3s_nodes_ingress_http { 20 | %{ for ip in k3s_nodes ~} 21 | server ${ip}:80; 22 | %{ endfor ~} 23 | } 24 | 25 | upstream k3s_nodes_ingress_https { 26 | %{ for ip in k3s_nodes ~} 27 | server ${ip}:443; 28 | %{ endfor ~} 29 | } 30 | 31 | server { 32 | listen 443; 33 | proxy_pass k3s_nodes_ingress_http; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /docs/roll-node-pools.md: -------------------------------------------------------------------------------- 1 | # How to Roll Node Pools 2 | 3 | ## Spin up New Nodes 4 | 5 | ```terraform 6 | // .... 7 | // This is the old node pool: 8 | { 9 | name = "original" 10 | size = 2 11 | subnet = "192.168.0.208/28" # 14 ips 12 | }, 13 | // Add the new updated nodepool like below: 14 | { 15 | name = "new-pool" 16 | size = 2 17 | subnet = "192.168.0.224/28" # 14 ips 18 | // You probably want to set this to change the template from the old one 19 | // that is being used on the original node pool. 20 | template = "new-proxmox-node-template" 21 | }, 22 | // ... 23 | ``` 24 | 25 | Once you have made your changes, you will need to run `terraform apply`. Note 26 | that this change should be purely additive. 27 | 28 | > On a constrained system, you might only be able to spin up 1 node from the 29 | new nodepool. This is OK, but you might need to revisit this step to shrink the 30 | original pool and grow the new pool to fully roll all your workloads over. 31 | 32 | If everything was applied correctly, you should now see the node(s) from the 33 | new pool we just added available in the cluster: 34 | 35 | ```sh 36 | kubectl get nodes 37 | ``` 38 | 39 | ## Cordon Old Nodes 40 | 41 | To begin moving workloads over, first cordon all of the nodes from the original 42 | node pool. 43 | 44 | ```sh 45 | # Note that you will need to change the regex to match nodes from your original node pool 46 | kubectl get nodes | grep -o "k3s-original-." | xargs kubectl cordon 47 | ``` 48 | 49 | Just to validate, check that only the old nodes have the status of `SchedulingDisabled`. 50 | 51 | ```sh 52 | kubectl get nodes 53 | ``` 54 | 55 | Now any restarted workloads will no longer start on the original node pool. 56 | 57 | ## Test New Nodes (Optional) 58 | 59 | If it is important to minimize all possible downtime for the workloads on your 60 | cluster, you may want to run a canary deployment to make sure workloads being 61 | moved over will not instantly crash. For homelabs, this is not usually a 62 | concern and this step can be skipped. It is always easy to revert the change 63 | with a call to `kubectl uncordon`. 64 | 65 | ## Restart Workloads 66 | 67 | At this point, we need to move workloads over to the new nodes. Depending on 68 | your system, some may have already started moving over. The best way to 69 | __drain__ nodes completely is to __evict__ workloads with `kubectl drain`: 70 | 71 | > **NOTE:** This will delete all the data from workloads not configured with 72 | persistent volumes. If you are not sure if your workloads are configured 73 | correctly, do not continue. 74 | 75 | ```sh 76 | # Don't forget to update the regex with your original pool name! 77 | kubectl get nodes | grep -o "k3s-original-." | xargs \ 78 | kubectl drain --ignore-errors --ignore-daemonsets --delete-emptydir-data 79 | ``` 80 | 81 | > At the time of writing this document, running the `kubectl drain` command 82 | without `--ignore-errors` is a deprecated behaviour in favor of ignoring by 83 | default. This will ensure that the command will not exit early if one of the 84 | first nodes encounters an error when draining. 85 | 86 | At this point, all your workloads (except for daemon sets since they by design 87 | run on every node all the time) should be moved over to your new node pool. A 88 | measure I like to take (on smaller clusters), just to be 100% sure is to list 89 | all the pods and visually inspect to make sure we don't have any stragglers. 90 | 91 | ```sh 92 | kubectl get pods -A -o wide 93 | ``` 94 | 95 | Note that its okay to have some pods still on the drained nodes - just make 96 | sure they are the pods from a deamon set. 97 | 98 | ## Destroy Old Nodes 99 | 100 | Once we are happy with the state of the rollover, we are finally able to 101 | destroy the old node pool and the nodes that make it up. To do that, just 102 | delete the node pool from the list in your terraform file and then run the good 103 | old: 104 | 105 | ```sh 106 | terraform apply 107 | ``` 108 | 109 | The deleted nodes might still be showing up in `kubectl` with a status of 110 | `NotReady`. If this is the case, clean them up with: 111 | 112 | ```sh 113 | # Don't forget to update the regex with your original pool name! 114 | kubectl get nodes | grep -o "k3s-original-." | xargs kubectl delete node 115 | # Sometimes this command hangs for a while waiting for the api to clean up. 116 | # Skip the waiting with Ctrl + C once all the nodes have been logged as deleted 117 | ``` 118 | 119 | Congrats! You have now successfully rolled your cluster's nodes! 120 | 121 | -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | # Proxmox/K3s Example 2 | 3 | This is an example project for setting up your own K3s cluster at home. 4 | 5 | ## Summary 6 | 7 | ### VMs 8 | 9 | This will spin up: 10 | 11 | - 1 support vm with api loadbalancer and k3s database with 2 cores and 4Gb mem 12 | - 2 master nodes with 2 cores and 4Gb mem 13 | - 1 node pool with 2 worker nodes each having 2 cores and 4Gb mem 14 | 15 | ### Networking 16 | 17 | - The support VM will be spun up on `192.168.0.200` 18 | - The master VMs will be spun up on `192.168.0.201` and `192.168.0.202` 19 | - The worker VMs will be spun up on `192.168.0.208` and `192.168.0.209` 20 | 21 | > Note: To eliminate potential IP clashing with existing computers on your 22 | network, it is **STRONGLY** recommended that you take IPs `192.168.0.200` - 23 | `192.168.0.254` out of your DHCP server's rotation. Otherwise other computers 24 | in your network may already be using these IPs and that will create conflicts! 25 | Check your router's manual or google it for a step-by-step guide. 26 | 27 | ## Usage 28 | 29 | To run this example, make sure you `cd` to this directory in your terminal, 30 | then 31 | 1. Copy your public key to the `authorized_keys` file. In most cases, you 32 | should be able to do this by running 33 | `cat ~/.ssh/id_rsa.pub > authorized_keys`. 34 | 2. Find your Proxmox API. It should look something like 35 | `https://192.168.0.25:8006/api2/json`. Once you found it, update the value 36 | in the `main.tf` file marked as `TODO` in the `provider proxmox` section. 37 | 3. Authenticate to the proxmox API **for the current terminal session** by setting the two variables: 38 | ``` 39 | # Update these to be your proxmox user/password. 40 | # Note that you usually need to keep the @pam at the end of the user. 41 | export PM_USER="terraform-prov@pve" 42 | export PM_PASS="password" 43 | ``` 44 | 45 | > Find other ways to auth to proxmox by reading [the providor's docs](https://github.com/Telmate/terraform-provider-proxmox/blob/master/docs/index.md). 46 | 4. Run `terraform init` (only needs to be done the first time) 47 | 5. Run `terraform apply` 48 | 6. Review the plan. Make sure it is doing what you expect! 49 | 7. Enter `yes` in the prompt and wait for your cluster to spin up. 50 | 8. Retrieve your kubecontext by running 51 | `terraform output -raw kubeconfig > config.yaml` 52 | 9. Make all your `kubectl` commands work with your cluster for your terminal 53 | session by running `export KUBECONFIG="config.yaml"`. If you want to add the 54 | context more perminantly globaly, [refer to the document on managing Kubernetes configs](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#create-a-second-configuration-file). 55 | 56 | -------------------------------------------------------------------------------- /example/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | proxmox = { 4 | source = "Telmate/proxmox" 5 | version = "2.9.3" 6 | } 7 | 8 | macaddress = { 9 | source = "ivoronin/macaddress" 10 | version = "0.3.0" 11 | } 12 | } 13 | } 14 | 15 | provider proxmox { 16 | pm_log_enable = true 17 | pm_log_file = "terraform-plugin-proxmox.log" 18 | pm_debug = true 19 | pm_log_levels = { 20 | _default = "debug" 21 | _capturelog = "" 22 | } 23 | 24 | ## TODO: Update these for your specific setup 25 | pm_api_url = "https://192.168.0.25:8006/api2/json" 26 | } 27 | 28 | module "k3s" { 29 | source = "fvumbaca/k3s/proxmox" 30 | version = ">= 0.0.0, < 1" # Get latest 0.X release 31 | 32 | authorized_keys_file = "authorized_keys" 33 | 34 | proxmox_node = "my-proxmox-node" 35 | 36 | node_template = "ubuntu-template" 37 | proxmox_resource_pool = "my-k3s" 38 | 39 | network_gateway = "192.168.0.1" 40 | lan_subnet = "192.168.0.0/24" 41 | 42 | support_node_settings = { 43 | cores = 2 44 | memory = 4096 45 | } 46 | 47 | master_nodes_count = 2 48 | master_node_settings = { 49 | cores = 2 50 | memory = 4096 51 | } 52 | 53 | # 192.168.0.200 -> 192.168.0.207 (6 available IPs for nodes) 54 | control_plane_subnet = "192.168.0.200/29" 55 | 56 | node_pools = [ 57 | { 58 | name = "default" 59 | size = 2 60 | # 192.168.0.208 -> 192.168.0.223 (14 available IPs for nodes) 61 | subnet = "192.168.0.208/28" 62 | } 63 | ] 64 | } 65 | 66 | output "kubeconfig" { 67 | value = module.k3s.k3s_kubeconfig 68 | sensitive = true 69 | } 70 | 71 | -------------------------------------------------------------------------------- /master_nodes.tf: -------------------------------------------------------------------------------- 1 | resource "macaddress" "k3s-masters" { 2 | count = var.master_nodes_count 3 | } 4 | 5 | locals { 6 | master_node_settings = defaults(var.master_node_settings, { 7 | cores = 2 8 | sockets = 1 9 | memory = 4096 10 | storage_type = "scsi" 11 | storage_id = "local-lvm" 12 | disk_size = "20G" 13 | user = "k3s" 14 | network_bridge = "vmbr0" 15 | network_tag = -1 16 | }) 17 | 18 | master_node_ips = [for i in range(var.master_nodes_count) : cidrhost(var.control_plane_subnet, i + 1)] 19 | } 20 | 21 | resource "random_password" "k3s-server-token" { 22 | length = 32 23 | special = false 24 | override_special = "_%@" 25 | } 26 | 27 | resource "proxmox_vm_qemu" "k3s-master" { 28 | depends_on = [ 29 | proxmox_vm_qemu.k3s-support, 30 | ] 31 | 32 | count = var.master_nodes_count 33 | target_node = var.proxmox_node 34 | name = "${var.cluster_name}-master-${count.index}" 35 | 36 | clone = var.node_template 37 | 38 | pool = var.proxmox_resource_pool 39 | 40 | # cores = 2 41 | cores = local.master_node_settings.cores 42 | sockets = local.master_node_settings.sockets 43 | memory = local.master_node_settings.memory 44 | 45 | agent = 1 46 | 47 | disk { 48 | type = local.master_node_settings.storage_type 49 | storage = local.master_node_settings.storage_id 50 | size = local.master_node_settings.disk_size 51 | } 52 | 53 | network { 54 | bridge = local.master_node_settings.network_bridge 55 | firewall = true 56 | link_down = false 57 | macaddr = upper(macaddress.k3s-masters[count.index].address) 58 | model = "virtio" 59 | queues = 0 60 | rate = 0 61 | tag = local.master_node_settings.network_tag 62 | } 63 | 64 | lifecycle { 65 | ignore_changes = [ 66 | ciuser, 67 | sshkeys, 68 | disk, 69 | network 70 | ] 71 | } 72 | 73 | os_type = "cloud-init" 74 | 75 | ciuser = local.master_node_settings.user 76 | 77 | ipconfig0 = "ip=${local.master_node_ips[count.index]}/${local.lan_subnet_cidr_bitnum},gw=${var.network_gateway}" 78 | 79 | sshkeys = file(var.authorized_keys_file) 80 | 81 | nameserver = var.nameserver 82 | 83 | connection { 84 | type = "ssh" 85 | user = local.master_node_settings.user 86 | host = local.master_node_ips[count.index] 87 | } 88 | 89 | provisioner "remote-exec" { 90 | inline = [ 91 | templatefile("${path.module}/scripts/install-k3s-server.sh.tftpl", { 92 | mode = "server" 93 | tokens = [random_password.k3s-server-token.result] 94 | alt_names = concat([local.support_node_ip], var.api_hostnames) 95 | server_hosts = [] 96 | node_taints = ["CriticalAddonsOnly=true:NoExecute"] 97 | disable = var.k3s_disable_components 98 | datastores = [{ 99 | host = "${local.support_node_ip}:3306" 100 | name = "k3s" 101 | user = "k3s" 102 | password = random_password.k3s-master-db-password.result 103 | }] 104 | 105 | http_proxy = var.http_proxy 106 | }) 107 | ] 108 | } 109 | } 110 | 111 | data "external" "kubeconfig" { 112 | depends_on = [ 113 | proxmox_vm_qemu.k3s-support, 114 | proxmox_vm_qemu.k3s-master 115 | ] 116 | 117 | program = [ 118 | "/usr/bin/ssh", 119 | "-o UserKnownHostsFile=/dev/null", 120 | "-o StrictHostKeyChecking=no", 121 | "${local.master_node_settings.user}@${local.master_node_ips[0]}", 122 | "echo '{\"kubeconfig\":\"'$(sudo cat /etc/rancher/k3s/k3s.yaml | base64)'\"}'" 123 | ] 124 | } 125 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "k3s_db_password" { 3 | value = random_password.k3s-master-db-password.result 4 | sensitive = true 5 | } 6 | 7 | output "k3s_db_name" { 8 | value = local.support_node_settings.db_name 9 | } 10 | 11 | output "k3s_db_user" { 12 | value = local.support_node_settings.db_user 13 | } 14 | 15 | output "k3s_db_host" { 16 | value = "${local.support_node_ip}:3306" 17 | } 18 | 19 | output "root_db_password" { 20 | value = random_password.support-db-password.result 21 | sensitive = true 22 | } 23 | 24 | output "support_node_ip" { 25 | value = local.support_node_ip 26 | } 27 | 28 | output "support_node_user" { 29 | value = local.support_node_settings.user 30 | } 31 | 32 | output "master_node_ips" { 33 | value = local.master_node_ips 34 | } 35 | 36 | output "k3s_server_token" { 37 | value = random_password.k3s-server-token.result 38 | sensitive = true 39 | } 40 | 41 | output "k3s_master_node_ips" { 42 | value = local.master_node_ips 43 | } 44 | 45 | output "k3s_kubeconfig" { 46 | value = replace(base64decode(replace(data.external.kubeconfig.result.kubeconfig, " ", "")), "server: https://127.0.0.1:6443", "server: https://${local.support_node_ip}:6443") 47 | sensitive = true 48 | } 49 | 50 | -------------------------------------------------------------------------------- /scripts/install-k3s-server.sh.tftpl: -------------------------------------------------------------------------------- 1 | export HTTP_PROXY="${http_proxy}" 2 | export HTTPS_PROXY="${http_proxy}" 3 | export http_proxy="${http_proxy}" 4 | export https_proxy="${http_proxy}" 5 | 6 | curl -sfL https://get.k3s.io | sh -s - ${mode} \ 7 | %{ for component in disable ~} 8 | --disable ${component} \ 9 | %{ endfor ~} 10 | %{ for host in server_hosts ~} 11 | --server "${host}" \ 12 | %{ endfor ~} 13 | %{ for alt in alt_names ~} 14 | --tls-san "${alt}" \ 15 | %{ endfor ~} 16 | %{ for token in tokens ~} 17 | --token "${token}" \ 18 | %{ endfor ~} 19 | %{ for taint in node_taints ~} 20 | --node-taint "${ taint }" \ 21 | %{ endfor ~} 22 | %{ for db in datastores ~} 23 | --datastore-endpoint="mysql://${db.user}:${db.password}@tcp(${db.host})/${db.name}" \ 24 | %{ endfor ~} 25 | --log $HOME/.k3s-install-log.txt 26 | -------------------------------------------------------------------------------- /scripts/install-support-apps.sh.tftpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MARIADB_ROOT_PASSWORD="${root_password}" 4 | MARIADB_K3S_DATABASE="${k3s_database}" 5 | MARIADB_K3S_USER="${k3s_user}" 6 | MARIADB_K3S_PASSWORD="${k3s_password}" 7 | 8 | export HTTP_PROXY="${http_proxy}" 9 | export HTTPS_PROXY="${http_proxy}" 10 | export http_proxy="${http_proxy}" 11 | export https_proxy="${http_proxy}" 12 | 13 | mariadb() { 14 | sudo -E apt install mariadb-server -y 15 | 16 | # Make mariadb listen to all remote requests 17 | sudo sed -i -e 's/\(bind-address\s*=\s*\)[0-9.]*/\10.0.0.0/g' /etc/mysql/mariadb.conf.d/50-server.cnf 18 | 19 | # Replicate mysql_secure_installation script 20 | sudo mariadb -e "UPDATE mysql.user SET Password=PASSWORD('$MARIADB_ROOT_PASSWORD') WHERE User='root'" 21 | sudo mariadb -e "DELETE FROM mysql.user WHERE User=''" 22 | sudo mariadb -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')" 23 | sudo mariadb -e "DELETE FROM mysql.db WHERE Db='test' OR Db='test\_%'" 24 | 25 | # Setup db and user for k3s 26 | sudo mariadb -e "CREATE DATABASE IF NOT EXISTS $MARIADB_K3S_DATABASE" 27 | sudo mariadb -e "CREATE USER IF NOT EXISTS $MARIADB_K3S_USER IDENTIFIED BY '$MARIADB_K3S_PASSWORD'" 28 | sudo mariadb -e "GRANT ALL ON $MARIADB_K3S_DATABASE.* to $MARIADB_K3S_USER IDENTIFIED BY '$MARIADB_K3S_PASSWORD'" 29 | 30 | # Flush and restart db 31 | sudo mysql -e "FLUSH PRIVILEGES" 32 | sudo systemctl restart mariadb 33 | } 34 | 35 | nginx() { 36 | sudo -E apt install nginx -y 37 | } 38 | 39 | mariadb 40 | nginx 41 | -------------------------------------------------------------------------------- /support_node.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "macaddress" "k3s-support" {} 3 | 4 | locals { 5 | support_node_settings = defaults(var.support_node_settings, { 6 | cores = 2 7 | sockets = 1 8 | memory = 4096 9 | 10 | 11 | storage_type = "scsi" 12 | storage_id = "local-lvm" 13 | disk_size = "10G" 14 | user = "support" 15 | network_tag = -1 16 | 17 | 18 | 19 | db_name = "k3s" 20 | db_user = "k3s" 21 | 22 | 23 | 24 | network_bridge = "vmbr0" 25 | }) 26 | 27 | support_node_ip = cidrhost(var.control_plane_subnet, 0) 28 | } 29 | 30 | locals { 31 | lan_subnet_cidr_bitnum = split("/", var.lan_subnet)[1] 32 | } 33 | 34 | resource "proxmox_vm_qemu" "k3s-support" { 35 | target_node = var.proxmox_node 36 | name = join("-", [var.cluster_name, "support"]) 37 | 38 | clone = var.node_template 39 | 40 | pool = var.proxmox_resource_pool 41 | 42 | # cores = 2 43 | cores = local.support_node_settings.cores 44 | sockets = local.support_node_settings.sockets 45 | memory = local.support_node_settings.memory 46 | 47 | 48 | agent = 1 49 | disk { 50 | type = local.support_node_settings.storage_type 51 | storage = local.support_node_settings.storage_id 52 | size = local.support_node_settings.disk_size 53 | } 54 | 55 | network { 56 | bridge = local.support_node_settings.network_bridge 57 | firewall = true 58 | link_down = false 59 | macaddr = upper(macaddress.k3s-support.address) 60 | model = "virtio" 61 | queues = 0 62 | rate = 0 63 | tag = local.support_node_settings.network_tag 64 | } 65 | 66 | lifecycle { 67 | ignore_changes = [ 68 | ciuser, 69 | sshkeys, 70 | disk, 71 | network 72 | ] 73 | } 74 | 75 | os_type = "cloud-init" 76 | 77 | ciuser = local.support_node_settings.user 78 | 79 | ipconfig0 = "ip=${local.support_node_ip}/${local.lan_subnet_cidr_bitnum},gw=${var.network_gateway}" 80 | 81 | sshkeys = file(var.authorized_keys_file) 82 | 83 | nameserver = var.nameserver 84 | 85 | connection { 86 | type = "ssh" 87 | user = local.support_node_settings.user 88 | host = local.support_node_ip 89 | } 90 | 91 | provisioner "file" { 92 | destination = "/tmp/install.sh" 93 | content = templatefile("${path.module}/scripts/install-support-apps.sh.tftpl", { 94 | root_password = random_password.support-db-password.result 95 | 96 | k3s_database = local.support_node_settings.db_name 97 | k3s_user = local.support_node_settings.db_user 98 | k3s_password = random_password.k3s-master-db-password.result 99 | 100 | http_proxy = var.http_proxy 101 | }) 102 | } 103 | 104 | provisioner "remote-exec" { 105 | inline = [ 106 | "chmod u+x /tmp/install.sh", 107 | "/tmp/install.sh", 108 | "rm -r /tmp/install.sh", 109 | ] 110 | } 111 | } 112 | 113 | resource "random_password" "support-db-password" { 114 | length = 16 115 | special = false 116 | override_special = "_%@" 117 | } 118 | 119 | resource "random_password" "k3s-master-db-password" { 120 | length = 16 121 | special = false 122 | override_special = "_%@" 123 | } 124 | 125 | resource "null_resource" "k3s_nginx_config" { 126 | 127 | depends_on = [ 128 | proxmox_vm_qemu.k3s-support 129 | ] 130 | 131 | triggers = { 132 | config_change = filemd5("${path.module}/config/nginx.conf.tftpl") 133 | } 134 | 135 | connection { 136 | type = "ssh" 137 | user = local.support_node_settings.user 138 | host = local.support_node_ip 139 | } 140 | 141 | provisioner "file" { 142 | destination = "/tmp/nginx.conf" 143 | content = templatefile("${path.module}/config/nginx.conf.tftpl", { 144 | k3s_server_hosts = [for ip in local.master_node_ips : 145 | "${ip}:6443" 146 | ] 147 | k3s_nodes = concat(local.master_node_ips, [ 148 | for node in local.listed_worker_nodes : 149 | node.ip 150 | ]) 151 | }) 152 | } 153 | 154 | provisioner "remote-exec" { 155 | inline = [ 156 | "sudo mv /tmp/nginx.conf /etc/nginx/nginx.conf", 157 | "sudo systemctl restart nginx.service", 158 | ] 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "proxmox_node" { 2 | description = "Proxmox node to create VMs on." 3 | type = string 4 | } 5 | 6 | variable "authorized_keys_file" { 7 | description = "Path to file containing public SSH keys for remoting into nodes." 8 | type = string 9 | } 10 | 11 | variable "network_gateway" { 12 | description = "IP address of the network gateway." 13 | type = string 14 | validation { 15 | # condition = can(regex("^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}/[0-9]{1,2}$", var.network_gateway)) 16 | condition = can(regex("^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$", var.network_gateway)) 17 | error_message = "The network_gateway value must be a valid ip." 18 | } 19 | } 20 | 21 | variable "lan_subnet" { 22 | description = < node 31 | } 32 | 33 | } 34 | 35 | resource "proxmox_vm_qemu" "k3s-worker" { 36 | depends_on = [ 37 | proxmox_vm_qemu.k3s-support, 38 | proxmox_vm_qemu.k3s-master, 39 | ] 40 | 41 | for_each = local.mapped_worker_nodes 42 | 43 | target_node = var.proxmox_node 44 | name = "${var.cluster_name}-${each.key}" 45 | 46 | clone = each.value.template 47 | 48 | pool = var.proxmox_resource_pool 49 | 50 | # cores = 2 51 | cores = each.value.cores 52 | sockets = each.value.sockets 53 | memory = each.value.memory 54 | 55 | agent = 1 56 | 57 | disk { 58 | type = each.value.storage_type 59 | storage = each.value.storage_id 60 | size = each.value.disk_size 61 | } 62 | 63 | network { 64 | bridge = each.value.network_bridge 65 | firewall = true 66 | link_down = false 67 | macaddr = upper(macaddress.k3s-workers[each.key].address) 68 | model = "virtio" 69 | queues = 0 70 | rate = 0 71 | tag = each.value.network_tag 72 | } 73 | 74 | lifecycle { 75 | ignore_changes = [ 76 | ciuser, 77 | sshkeys, 78 | disk, 79 | network 80 | ] 81 | } 82 | 83 | os_type = "cloud-init" 84 | 85 | ciuser = each.value.user 86 | 87 | ipconfig0 = "ip=${each.value.ip}/${local.lan_subnet_cidr_bitnum},gw=${var.network_gateway}" 88 | 89 | sshkeys = file(var.authorized_keys_file) 90 | 91 | nameserver = var.nameserver 92 | 93 | connection { 94 | type = "ssh" 95 | user = each.value.user 96 | host = each.value.ip 97 | } 98 | 99 | provisioner "remote-exec" { 100 | inline = [ 101 | templatefile("${path.module}/scripts/install-k3s-server.sh.tftpl", { 102 | mode = "agent" 103 | tokens = [random_password.k3s-server-token.result] 104 | alt_names = [] 105 | disable = [] 106 | server_hosts = ["https://${local.support_node_ip}:6443"] 107 | node_taints = each.value.taints 108 | datastores = [] 109 | 110 | http_proxy = var.http_proxy 111 | }) 112 | ] 113 | } 114 | 115 | } 116 | --------------------------------------------------------------------------------