├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── controller-values.yaml.template ├── controller ├── .helmignore ├── Chart.yaml ├── templates │ └── .gitkeep └── values.yaml ├── cp-kubeconf.sh ├── main.tf ├── scripts ├── k3s-management-additional.sh ├── k3s-management.sh └── k3s-worker.sh ├── terraform.tfvars.example └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | *.tfstate* 2 | *.tfvars 3 | .terraform/ 4 | 5 | controller-values.yaml 6 | kube_config.yaml -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "controller/charts/hcloud-csi-driver"] 2 | path = controller/charts/hcloud-csi-driver 3 | url = https://gitlab.com/MatthiasLohr/hcloud-csi-driver-helm-chart.git 4 | [submodule "controller/charts/hcloud-cloud-controller-manager"] 5 | path = controller/charts/hcloud-cloud-controller-manager 6 | url = https://gitlab.com/MatthiasLohr/hcloud-cloud-controller-manager-helm-chart.git 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Alexander Zimmermann 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # K3S on Hetzner Cloud 2 | 3 | This repository contains a Terraform template to spin up a k3s cluster on Rancher. 4 | It also provides a Helm-Chart which contains two Subcharts for installing the [hcloud-cloud-controller-manager](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and the [hcloud-csi-driver](https://github.com/hetznercloud/csi-driver). 5 | Thanks to [Matthias Lohr](https://gitlab.com/MatthiasLohr) for providing the Helm Charts. 6 | 7 | ## Usage 8 | 9 | ### Cluster Provisioning 10 | 11 | 1. Copy the `terraform.tfvars.example` to `terraform.tfvars` and hand in the information 12 | 2. Take a look at the `variables.tf` to check if you want to add more config 13 | 3. Run `terraform init` and `terraform plan`. Check if everything seems ok 14 | 4. Run `terraform apply` and let Terraform do the deployment 15 | 5. Get your kubeconfig by running `chmod +x ./cp-kubeconf.sh` followed by `./cp-kubeconf.sh ` 16 | 17 | ### Add Hetzner Cloud Integrations 18 | 19 | The attached Helm Chart can install all necessary components to provide a tight integration with Hetzner Cloud. 20 | 21 | 1. Initialize the submodules by running `git submodule update --init --recursive` 22 | 2. Install the Helm-Chart, using e.g. `helm --kubeconfig ./kube_config.yaml upgrade --namespace kube-system --install -f ./controller-values.yaml setup ./controller` 23 | * If you choose another command for helm installation, please name your Helm deployment `setup` or change the variable `hcloud-csi-driver.csiDriver.secret.name` 24 | 3. Check if all components get up 25 | -------------------------------------------------------------------------------- /controller-values.yaml.template: -------------------------------------------------------------------------------- 1 | hcloud-cloud-controller-manager: 2 | manager: 3 | secret: 4 | hcloudApiToken: "${api_token}" 5 | privateNetwork: 6 | id: "${private_network_id}" -------------------------------------------------------------------------------- /controller/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /controller/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: hcloud-controller 3 | description: Helm chart for initializing a cluster with Hetzner Cloud Controllers 4 | type: application 5 | version: 1.0.0 6 | appVersion: 1.0.0 7 | -------------------------------------------------------------------------------- /controller/templates/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexzimmer96/k3s-hcloud/6d0b29ba72d4493831b664fdffd2b7227ded6645/controller/templates/.gitkeep -------------------------------------------------------------------------------- /controller/values.yaml: -------------------------------------------------------------------------------- 1 | hcloud-cloud-controller-manager: 2 | manager: 3 | secret: 4 | create: true 5 | hcloudApiToken: "" 6 | privateNetwork: 7 | enabled: true 8 | clusterSubnet: "172.16.0.0/12" 9 | id: "" 10 | 11 | hcloud-csi-driver: 12 | csiDriver: 13 | secret: 14 | create: false 15 | name: "setup-hcloud-api-token" -------------------------------------------------------------------------------- /cp-kubeconf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" == "" ]; then 4 | echo "You need to specifiy a host!" 5 | return 6 | fi 7 | 8 | ssh -t -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile /dev/null" root@$1 'cloud-init status --wait' 9 | scp -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile /dev/null" root@$1:/etc/rancher/k3s/k3s.yaml ./kube_config.yaml 10 | sed -i 's/127.0.0.1/'$1'/g' ./kube_config.yaml -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hcloud = { 4 | source = "hetznercloud/hcloud" 5 | version = "1.24.0" 6 | } 7 | template = { 8 | version = "~> 2.2.0" 9 | } 10 | local = { 11 | version = "~> 2.0.0" 12 | } 13 | } 14 | } 15 | 16 | provider "hcloud" { 17 | token = var.hcloud_token 18 | } 19 | 20 | #====================================================================================================== 21 | # Creating Networks 22 | 23 | resource "hcloud_ssh_key" "k3s_management_ssh_key" { 24 | name = "k3s-management-key" 25 | public_key = file("${var.hcloud_ssh_key_path}.pub") 26 | } 27 | 28 | resource "hcloud_network" "k3s_internal_network" { 29 | name = "k3s-internal" 30 | ip_range = "172.16.0.0/12" 31 | } 32 | 33 | resource "hcloud_network_subnet" "k3s_default_subnet" { 34 | network_id = hcloud_network.k3s_internal_network.id 35 | type = "cloud" 36 | network_zone = var.hcloud_network_zone 37 | ip_range = "172.16.0.0/24" 38 | } 39 | 40 | #====================================================================================================== 41 | # Creating the master node 42 | 43 | resource "hcloud_server" "k3s_management_node" { 44 | name = "${var.instance_prefix}-management-1" 45 | image = "ubuntu-20.04" 46 | server_type = var.management_instance_type 47 | location = var.hcloud_zone 48 | 49 | user_data = templatefile("${path.module}/scripts/k3s-management.sh", { 50 | secret = var.cluster_secret 51 | }) 52 | 53 | ssh_keys = [ 54 | hcloud_ssh_key.k3s_management_ssh_key.id 55 | ] 56 | } 57 | 58 | resource "hcloud_server_network" "k3s_management_node_subnet" { 59 | server_id = hcloud_server.k3s_management_node.id 60 | subnet_id = hcloud_network_subnet.k3s_default_subnet.id 61 | } 62 | 63 | #====================================================================================================== 64 | # Creating additional management nodes 65 | 66 | resource "hcloud_server" "k3s_management_additional_nodes" { 67 | count = var.additional_management_nodes 68 | name = "${var.instance_prefix}-management-${count.index + 2}" 69 | image = "ubuntu-20.04" 70 | server_type = var.management_instance_type 71 | location = var.hcloud_zone 72 | 73 | user_data = templatefile("${path.module}/scripts/k3s-management-additional.sh", { 74 | secret = var.cluster_secret 75 | leader_ip = hcloud_server_network.k3s_management_node_subnet.ip 76 | }) 77 | 78 | ssh_keys = [ 79 | hcloud_ssh_key.k3s_management_ssh_key.id 80 | ] 81 | } 82 | 83 | resource "hcloud_server_network" "k3s_management_additional_nodes_subnets" { 84 | count = var.additional_management_nodes 85 | server_id = hcloud_server.k3s_management_additional_nodes[count.index].id 86 | subnet_id = hcloud_network_subnet.k3s_default_subnet.id 87 | } 88 | 89 | #====================================================================================================== 90 | # Creating the Worker Nodes 91 | 92 | resource "hcloud_server" "k3s_worker_nodes" { 93 | count = var.worker_nodes 94 | name = "${var.instance_prefix}-worker-${count.index + 1}" 95 | image = "ubuntu-20.04" 96 | server_type = var.worker_instance_type 97 | location = var.hcloud_zone 98 | 99 | user_data = templatefile("${path.module}/scripts/k3s-worker.sh", { 100 | secret = var.cluster_secret 101 | leader_ip = hcloud_server_network.k3s_management_node_subnet.ip 102 | }) 103 | 104 | ssh_keys = [ 105 | hcloud_ssh_key.k3s_management_ssh_key.id 106 | ] 107 | } 108 | 109 | resource "hcloud_server_network" "k3s_worker_nodes_subnets" { 110 | count = var.worker_nodes 111 | server_id = hcloud_server.k3s_worker_nodes[count.index].id 112 | subnet_id = hcloud_network_subnet.k3s_default_subnet.id 113 | } 114 | 115 | #====================================================================================================== 116 | # Creating the controller-values.yaml file from its template 117 | 118 | resource "local_file" "controller_values_output" { 119 | filename = "${path.module}/controller-values.yaml" 120 | content = templatefile("${path.module}/controller-values.yaml.template", { 121 | api_token = var.hcloud_token 122 | private_network_id = hcloud_network.k3s_internal_network.id 123 | }) 124 | } 125 | 126 | #====================================================================================================== 127 | # Creating the LoadBalancer and add the services 128 | 129 | # resource "hcloud_load_balancer" "k3s_management_lb" { 130 | # name = "k3s-management" 131 | # load_balancer_type = "lb11" 132 | # location = var.hcloud_zone 133 | # } 134 | 135 | # resource "hcloud_load_balancer_network" "k3s_management_lb_subnet" { 136 | # load_balancer_id = hcloud_load_balancer.k3s_management_lb.id 137 | # subnet_id = hcloud_network_subnet.k3s_default_subnet.id 138 | # } 139 | 140 | # resource "hcloud_load_balancer_target" "k3s_management_lb_target" { 141 | # type = "server" 142 | # load_balancer_id = hcloud_load_balancer.k3s_management_lb.id 143 | # server_id = hcloud_server.k3s_management_node.id 144 | # use_private_ip = true 145 | # depends_on = [hcloud_server_network.k3s_management_node_subnet] 146 | # } 147 | 148 | # resource "hcloud_load_balancer_target" "k3s_management_lb_additional_targets" { 149 | # count = var.additional_management_nodes 150 | # type = "server" 151 | # load_balancer_id = hcloud_load_balancer.k3s_management_lb.id 152 | # server_id = hcloud_server.k3s_management_additional_nodes[count.index].id 153 | # use_private_ip = true 154 | # depends_on = [hcloud_server_network.k3s_management_additional_nodes_subnets] 155 | # } 156 | 157 | # resource "hcloud_load_balancer_service" "k3s_management_lb_k8s_service" { 158 | # load_balancer_id = hcloud_load_balancer.k3s_management_lb.id 159 | # protocol = "tcp" 160 | # listen_port = 6443 161 | # destination_port = 6443 162 | # } 163 | -------------------------------------------------------------------------------- /scripts/k3s-management-additional.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get update && apt-get upgrade -y 4 | 5 | # Installing and enabling fail2ban 6 | sudo apt-get install -y fail2ban 7 | sudo systemctl start fail2ban 8 | sudo systemctl enable fail2ban 9 | 10 | # Initializing Master 11 | curl -sfL https://get.k3s.io | K3S_TOKEN=${secret} \ 12 | sh -s - server --token=${secret} --server https://${leader_ip}:6443 --disable=traefik,local-storage,servicelb --kubelet-arg="cloud-provider=external" --disable-cloud-controller -------------------------------------------------------------------------------- /scripts/k3s-management.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get update && apt-get upgrade -y 4 | 5 | # Installing and enabling fail2ban 6 | sudo apt-get install -y fail2ban 7 | sudo systemctl start fail2ban 8 | sudo systemctl enable fail2ban 9 | 10 | # Initializing Master 11 | curl -sfL https://get.k3s.io | K3S_TOKEN=${secret} \ 12 | sh -s - server --cluster-init --token=${secret} --disable=traefik,local-storage,servicelb --kubelet-arg="cloud-provider=external" --disable-cloud-controller -------------------------------------------------------------------------------- /scripts/k3s-worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get update && apt-get upgrade -y 4 | 5 | # Installing and enabling fail2ban 6 | sudo apt-get install -y fail2ban 7 | sudo systemctl start fail2ban 8 | sudo systemctl enable fail2ban 9 | 10 | # Initializing Master 11 | curl -sfL https://get.k3s.io | K3S_TOKEN=${secret} \ 12 | sh -s - agent --token=${secret} --server https://${leader_ip}:6443 --kubelet-arg="cloud-provider=external" -------------------------------------------------------------------------------- /terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | cluster_secret = "" 2 | hcloud_token = "" 3 | hcloud_ssh_key_path = "" -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_secret" { 2 | type = string 3 | description = "Secret that's shared between all Nodes and is used to connect to the Cluster." 4 | } 5 | 6 | variable "hcloud_token" { 7 | type = string 8 | description = "The token that is used to interact with the Hetzner Cloud API." 9 | } 10 | 11 | variable "hcloud_ssh_key_path" { 12 | type = string 13 | default = "~/.ssh/k3s_management" 14 | description = "Path to the key you want to use register on your Hetzner Cloud machines. The public key must have the same location and a .pub ending." 15 | } 16 | 17 | variable "hcloud_zone" { 18 | type = string 19 | default = "nbg1" 20 | description = "Zone you want your Cluster to get deployed in." 21 | } 22 | 23 | variable "hcloud_network_zone" { 24 | type = string 25 | default = "eu-central" 26 | description = "Network-Zone you want your Cluster to get deployed in." 27 | } 28 | 29 | variable "instance_prefix" { 30 | type = string 31 | default = "k3s" 32 | description = "The prefix that comes before the index-value to form the name of the machine." 33 | } 34 | 35 | variable "additional_management_nodes" { 36 | type = number 37 | default = 2 38 | description = "Number of additional management Nodes. Must be a always a even number, so the total amount of Management nodes is odd (1+2=3, 1+4=5, etc.)" 39 | } 40 | 41 | variable "management_instance_type" { 42 | type = string 43 | default = "cx11" 44 | description = "Hetzner instance type that is used for the machines. You can use the Hetzner Cloud CLI or browse their website to get a list of valid instance types." 45 | } 46 | 47 | variable "worker_nodes" { 48 | type = number 49 | default = 2 50 | description = "Number of additional management Nodes. Must be a always a even number, so the total amount of Management nodes is odd (1+2=3, 1+4=5, etc.)" 51 | } 52 | 53 | variable "worker_instance_type" { 54 | type = string 55 | default = "cx21" 56 | description = "Hetzner instance type that is used for the machines. You can use the Hetzner Cloud CLI or browse their website to get a list of valid instance types." 57 | } 58 | --------------------------------------------------------------------------------