├── .circleci └── config.yml ├── .gitignore ├── LICENSE ├── README.md ├── main.tf ├── modules ├── cluster │ ├── files │ │ └── 60-floating-ip.cfg │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── version.tf ├── firewall │ ├── main.tf │ └── scripts │ │ └── ufw.sh └── kubernetes │ ├── files │ ├── 00-cgroup-systemd.conf │ ├── 10-docker-opts.conf │ ├── 20-hetzner-cloud.conf │ ├── access_tokens.yaml │ ├── ccm-networks.yaml │ ├── hcloud-csi.yaml │ ├── kube-flannel.yaml │ └── sysctl.conf │ ├── kubeadm_join.tf │ ├── main.tf │ ├── outputs.tf │ ├── scripts │ ├── control_plane.sh │ ├── install.sh │ └── worker.sh │ └── variables.tf ├── outputs.tf └── variables.tf /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | validate_terraform: 5 | docker: 6 | - image: hashicorp/terraform:1.3.2 7 | steps: 8 | - checkout 9 | - run: 10 | name: Validate Terraform Formatting 11 | command: "[ -z \"$(terraform fmt -recursive -write=false)\" ] || { terraform fmt -recursive -write=false -diff; exit 1;}" 12 | - run: 13 | name: Validate Terraform configuration files 14 | command: terraform init && terraform validate 15 | 16 | workflows: 17 | version: 2 18 | validate: 19 | jobs: 20 | - validate_terraform 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | # Local .terraform directories 3 | **/.terraform/* 4 | **/.terraform* 5 | 6 | # .tfstate files 7 | *.tfstate 8 | *.tfstate.* 9 | 10 | # Crash log files 11 | crash.log 12 | 13 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 14 | # .tfvars files are managed as part of configuration and so should be included in 15 | # version control. 16 | # 17 | # example.tfvars 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # 28 | # !example_override.tf 29 | 30 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 31 | # example: *tfplan* 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Joost Döbken 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hetzner Cloud Kubernetes provider 🏖️ 2 | 3 | --- 4 | 5 | Unofficial Terraform module to provide Kubernetes for the Hetzner Cloud. 6 | 7 | [![JWDobken](https://circleci.com/gh/JWDobken/terraform-hcloud-kubernetes.svg?style=shield)](https://app.circleci.com/pipelines/github/JWDobken/terraform-hcloud-kubernetes?branch=main) 8 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/JWDobken/terraform-hcloud-kubernetes?label=release)](https://github.com/JWDobken/terraform-hcloud-kubernetes/releases) 9 | ![license](https://img.shields.io/github/license/JWDobken/terraform-hcloud-kubernetes.svg) 10 | 11 | Create a Kubernetes cluster on the [Hetzner cloud](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs), with the following features: 12 | 13 | - implements Hetzner's [private network](https://community.hetzner.com/tutorials/hcloud-networks-basic) for network security 14 | - configures [UFW](https://help.ubuntu.com/community/UFW) for managing complex iptables rules 15 | - deploys the [Flannel](https://github.com/coreos/flannel) CNI plugin 16 | - deploys the [Controller Manager](https://github.com/hetznercloud/hcloud-cloud-controller-manager) with networks support, to integrate with the Hetzner Cloud API 17 | - deploys the [Container Storage Interface](https://github.com/hetznercloud/csi-driver) for dynamic provisioning of volumes 18 | 19 | # Getting Started 20 | 21 | The Hetzner Cloud provider needs to be configured with _a token generated from the dashboard_, following to the [documentation](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs). Provide a [Hetzner Cloud SSH key resource](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs/resources/ssh_key) to access the cluster machines: 22 | 23 | ```hcl 24 | resource "hcloud_ssh_key" "demo_cluster" { 25 | name = "demo-cluster" 26 | public_key = file("~/.ssh/hcloud.pub") 27 | } 28 | ``` 29 | 30 | Create a Kubernetes cluster: 31 | 32 | ```hcl 33 | module "hcloud_kubernetes_cluster" { 34 | source = "JWDobken/kubernetes/hcloud" 35 | cluster_name = "demo-cluster" 36 | hcloud_token = var.hcloud_token 37 | hcloud_ssh_keys = [hcloud_ssh_key.demo_cluster.id] 38 | control_plane_type = "cx11" # optional 39 | worker_type = "cx21" # optional 40 | worker_count = 3 41 | } 42 | 43 | output "kubeconfig" { 44 | value = module.hcloud_kubernetes_cluster.kubeconfig 45 | } 46 | 47 | ``` 48 | 49 | When the cluster is deployed, the `kubeconfig` to reach the cluster is available from the output. There are many ways to continue, but you can store it to file: 50 | 51 | ```cmd 52 | terraform output -raw kubeconfig > demo-cluster.conf 53 | ``` 54 | 55 | and check the access by viewing the created cluster nodes: 56 | 57 | ```cmd 58 | $ kubectl get nodes --kubeconfig=demo-cluster.conf 59 | NAME STATUS ROLES AGE VERSION 60 | control-plane-1 Ready control-plane 84s v1.25.2 61 | worker-1 Ready 50s v1.25.2 62 | worker-2 Ready 51s v1.25.2 63 | worker-3 Ready 51s v1.25.2 64 | ``` 65 | 66 | ## Load Balancer 67 | 68 | The [Controller Manager](https://github.com/hetznercloud/hcloud-cloud-controller-manager/blob/master/docs/load_balancers.md) deploys a load balancer for any `Service` of type `LoadBalancer`, that can be configured with `service.annotations`. It is also possible to create the load balancer within the network using the [Terraform provider](https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs/resources/load_balancer): 69 | 70 | ```hcl 71 | resource "hcloud_load_balancer" "load_balancer" { 72 | name = "demo-cluster-lb" 73 | load_balancer_type = "lb11" 74 | location = "nbg1" 75 | } 76 | 77 | resource "hcloud_load_balancer_network" "cluster_network" { 78 | load_balancer_id = hcloud_load_balancer.load_balancer.id 79 | network_id = module.hcloud_kubernetes_cluster.network_id 80 | } 81 | ``` 82 | 83 | ...and pass the name to the `service.annotations`. For example, deploy the ingress-controller, such as [Bitnami's Nginx Ingress Controller](https://github.com/bitnami/charts/tree/master/bitnami/nginx-ingress-controller), with the name of the load balancer as an annotation: 84 | 85 | ```cmd 86 | helm repo add bitnami https://charts.bitnami.com/bitnami 87 | helm upgrade --install nginx-ingress \ 88 | --set service.annotations."load-balancer\.hetzner\.cloud/name"="demo-cluster-lb" \ 89 | bitnami/nginx-ingress-controller 90 | ``` 91 | 92 | ## Chaining other terraform modules 93 | 94 | TLS certificate credentials form the output can be used to chain other Terraform modules, such as the [Helm provider](https://registry.terraform.io/providers/hashicorp/helm/latest/docs) or the [Kubernetes provider](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs): 95 | 96 | ```hcl 97 | provider "helm" { 98 | kubernetes { 99 | host = module.hcloud_kubernetes_cluster.endpoint 100 | 101 | cluster_ca_certificate = base64decode(module.hcloud_kubernetes_cluster.certificate_authority_data) 102 | client_certificate = base64decode(module.hcloud_kubernetes_cluster.client_certificate_data) 103 | client_key = base64decode(module.hcloud_kubernetes_cluster.client_key_data) 104 | } 105 | } 106 | 107 | provider "kubernetes" { 108 | host = module.hcloud_kubernetes_cluster.endpoint 109 | 110 | client_certificate = base64decode(module.hcloud_kubernetes_cluster.client_certificate_data) 111 | client_key = base64decode(module.hcloud_kubernetes_cluster.client_key_data) 112 | cluster_ca_certificate = base64decode(module.hcloud_kubernetes_cluster.client_certificate_data) 113 | } 114 | ``` 115 | 116 | ## Considered features: 117 | 118 | - When a node is destroyed, I still need to run `kubectl drain ` and `kubectl delete node `. Compare actual list with `kubectl get nodes --output 'jsonpath={.items[*].metadata.name}'`. 119 | - [High availability for the control-plane](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/). 120 | - Node-pool architecture, with option to label and taint. 121 | - Initialize multiple control-plane nodes. 122 | 123 | ## Acknowledgements 124 | 125 | This module came about when I was looking for an affordable Kubernetes cluster. There is an [article from Christian Beneke](https://community.hetzner.com/tutorials/install-kubernetes-cluster) and there are a couple of Terraform projects on which the current is heavily based: 126 | 127 | - Patrick Stadler's [hobby-kube provisioning](https://github.com/hobby-kube/provisioning) 128 | - Niclas Mietz's [terraform-k8s-hcloud](https://github.com/solidnerd/terraform-k8s-hcloud) 129 | 130 | Feel free to contribute or reach out to me. 131 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | module "cluster" { 2 | source = "./modules/cluster" 3 | hcloud_token = var.hcloud_token 4 | hcloud_ssh_keys = var.hcloud_ssh_keys 5 | cluster_name = var.cluster_name 6 | location = var.location 7 | image = var.image 8 | network_zone = var.network_zone 9 | network_ip_range = var.network_ip_range 10 | subnet_ip_range = var.subnet_ip_range 11 | control_plane_type = var.control_plane_type 12 | control_plane_count = var.control_plane_count 13 | control_plane_name_format = var.control_plane_name_format 14 | worker_type = var.worker_type 15 | worker_count = var.worker_count 16 | worker_name_format = var.worker_name_format 17 | } 18 | 19 | module "firewall" { 20 | source = "./modules/firewall" 21 | connections = module.cluster.all_nodes.*.ipv4_address 22 | subnet_ip_range = var.subnet_ip_range 23 | } 24 | 25 | module "kubernetes" { 26 | source = "./modules/kubernetes" 27 | hcloud_token = var.hcloud_token 28 | network_id = module.cluster.network_id 29 | cluster_name = var.cluster_name 30 | control_plane_nodes = module.cluster.control_plane_nodes 31 | worker_nodes = module.cluster.worker_nodes 32 | private_ips = module.cluster.private_ips 33 | kubernetes_version = var.kubernetes_version 34 | } 35 | -------------------------------------------------------------------------------- /modules/cluster/files/60-floating-ip.cfg: -------------------------------------------------------------------------------- 1 | auto eth0:1 2 | iface eth0:1 inet static 3 | address ${loadbalancer_ip} 4 | netmask 32 -------------------------------------------------------------------------------- /modules/cluster/main.tf: -------------------------------------------------------------------------------- 1 | # cluster/main.tf 2 | 3 | locals { 4 | server_count = var.control_plane_count + var.worker_count 5 | servers = concat(hcloud_server.control_plane_node, hcloud_server.worker_node) 6 | } 7 | 8 | resource "hcloud_server" "control_plane_node" { 9 | count = var.control_plane_count 10 | name = format(var.control_plane_name_format, count.index + 1) 11 | location = var.location 12 | image = var.image 13 | server_type = var.control_plane_type 14 | ssh_keys = var.hcloud_ssh_keys 15 | 16 | labels = { 17 | control-plane = true 18 | } 19 | 20 | connection { 21 | user = "root" 22 | type = "ssh" 23 | timeout = "2m" 24 | host = self.ipv4_address 25 | } 26 | 27 | provisioner "file" { 28 | content = templatefile("${path.module}/files/60-floating-ip.cfg", { loadbalancer_ip = var.loadbalancer_ip }) 29 | destination = "/etc/network/interfaces.d/60-floating-ip.cfg" 30 | } 31 | 32 | provisioner "remote-exec" { 33 | inline = [ 34 | "while fuser /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock >/dev/null 2>&1; do sleep 1; done", 35 | "apt-get update", 36 | "apt-get install -yq ufw jq", 37 | ] 38 | } 39 | } 40 | 41 | resource "hcloud_server" "worker_node" { 42 | count = var.worker_count 43 | name = format(var.worker_name_format, count.index + 1) 44 | location = var.location 45 | image = var.image 46 | server_type = var.worker_type 47 | ssh_keys = var.hcloud_ssh_keys 48 | 49 | labels = { 50 | control-plane = false 51 | } 52 | 53 | connection { 54 | user = "root" 55 | type = "ssh" 56 | timeout = "2m" 57 | host = self.ipv4_address 58 | } 59 | 60 | provisioner "file" { 61 | content = templatefile("${path.module}/files/60-floating-ip.cfg", { loadbalancer_ip = var.loadbalancer_ip }) 62 | destination = "/etc/network/interfaces.d/60-floating-ip.cfg" 63 | } 64 | 65 | provisioner "remote-exec" { 66 | inline = [ 67 | "while fuser /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock >/dev/null 2>&1; do sleep 1; done", 68 | "apt-get update", 69 | "apt-get install -yq ufw jq" 70 | ] 71 | } 72 | } 73 | 74 | resource "hcloud_network" "kubernetes_network" { 75 | name = var.cluster_name 76 | ip_range = var.network_ip_range 77 | } 78 | 79 | resource "hcloud_network_subnet" "kubernetes_subnet" { 80 | network_id = hcloud_network.kubernetes_network.id 81 | type = "server" 82 | network_zone = var.network_zone 83 | ip_range = var.subnet_ip_range 84 | } 85 | 86 | resource "hcloud_server_network" "private_network" { 87 | count = local.server_count 88 | server_id = element(local.servers.*.id, count.index) 89 | subnet_id = hcloud_network_subnet.kubernetes_subnet.id 90 | } 91 | -------------------------------------------------------------------------------- /modules/cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | # cluster/outputs.tf 2 | 3 | output "private_ips" { 4 | description = "" 5 | value = hcloud_server_network.private_network.*.ip 6 | } 7 | 8 | output "private_network_interface" { 9 | value = "enp7s0" 10 | } 11 | 12 | output "all_nodes" { 13 | description = "List of all created servers." 14 | value = local.servers 15 | } 16 | 17 | output "control_plane_nodes" { 18 | description = "List of control-plane nodes." 19 | value = hcloud_server.control_plane_node 20 | } 21 | 22 | output "worker_nodes" { 23 | description = "List of worker nodes." 24 | value = hcloud_server.worker_node 25 | } 26 | 27 | output "network_id" { 28 | value = hcloud_network.kubernetes_network.id 29 | } 30 | 31 | output "private_network" { 32 | value = hcloud_server_network.private_network 33 | } 34 | -------------------------------------------------------------------------------- /modules/cluster/provider.tf: -------------------------------------------------------------------------------- 1 | # cluster/provider 2 | 3 | provider "hcloud" { 4 | token = var.hcloud_token 5 | } 6 | -------------------------------------------------------------------------------- /modules/cluster/variables.tf: -------------------------------------------------------------------------------- 1 | # cluster/variables.tf 2 | 3 | # GENERAL 4 | variable "hcloud_token" { 5 | default = "" 6 | } 7 | 8 | variable "hcloud_ssh_keys" { 9 | type = list(any) 10 | } 11 | 12 | variable "cluster_name" { 13 | type = string 14 | } 15 | 16 | variable "location" { 17 | type = string 18 | } 19 | 20 | variable "image" { 21 | type = string 22 | } 23 | 24 | # NETWORK 25 | variable "network_zone" { 26 | type = string 27 | } 28 | 29 | variable "network_ip_range" { 30 | type = string 31 | } 32 | 33 | variable "subnet_ip_range" { 34 | type = string 35 | } 36 | 37 | # CONTROL-PLANE NODES 38 | variable "control_plane_type" { 39 | type = string 40 | } 41 | 42 | variable "control_plane_count" { 43 | type = number 44 | } 45 | 46 | variable "control_plane_name_format" { 47 | type = string 48 | } 49 | 50 | # WORKER NODES 51 | variable "worker_type" { 52 | type = string 53 | } 54 | 55 | variable "worker_count" { 56 | type = number 57 | } 58 | 59 | variable "worker_name_format" { 60 | type = string 61 | } 62 | 63 | # LOAD BALANCER 64 | variable "loadbalancer_ip" { 65 | type = string 66 | default = "159.69.0.1" 67 | } 68 | -------------------------------------------------------------------------------- /modules/cluster/version.tf: -------------------------------------------------------------------------------- 1 | # cluster/version.tf 2 | terraform { 3 | required_version = ">= 1.1.9" 4 | required_providers { 5 | hcloud = { 6 | source = "hetznercloud/hcloud" 7 | version = ">= 1.35.2" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /modules/firewall/main.tf: -------------------------------------------------------------------------------- 1 | # firewall/main.tf 2 | 3 | variable "connections" { 4 | type = list(any) 5 | } 6 | 7 | variable "subnet_ip_range" { 8 | type = string 9 | } 10 | 11 | resource "null_resource" "firewall" { 12 | count = length(var.connections) 13 | 14 | triggers = { 15 | template = templatefile("${path.module}/scripts/ufw.sh", { subnet_ip_range = var.subnet_ip_range }) 16 | } 17 | 18 | connection { 19 | host = element(var.connections, count.index) 20 | user = "root" 21 | agent = true 22 | } 23 | 24 | provisioner "remote-exec" { 25 | inline = [ 26 | templatefile("${path.module}/scripts/ufw.sh", { subnet_ip_range = var.subnet_ip_range }) 27 | ] 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /modules/firewall/scripts/ufw.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # Set firewall rules 5 | ufw --force reset 6 | ufw default deny incoming 7 | ufw default allow outgoing 8 | 9 | ufw allow ssh 10 | ufw allow 6443 11 | ufw allow http 12 | ufw allow https 13 | 14 | ufw allow from ${subnet_ip_range} 15 | 16 | ufw --force enable 17 | ufw status verbose 18 | 19 | ufw disable 20 | -------------------------------------------------------------------------------- /modules/kubernetes/files/00-cgroup-systemd.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStart= 3 | ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd -------------------------------------------------------------------------------- /modules/kubernetes/files/10-docker-opts.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | MountFlags=shared 3 | Environment="DOCKER_OPTS=--iptables=false --ip-masq=false" -------------------------------------------------------------------------------- /modules/kubernetes/files/20-hetzner-cloud.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_EXTRA_ARGS=--cloud-provider=external" -------------------------------------------------------------------------------- /modules/kubernetes/files/access_tokens.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: hcloud 5 | namespace: kube-system 6 | stringData: 7 | token: "${hcloud_token}" 8 | network: "${network_id}" 9 | --- 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: hcloud-csi 14 | namespace: kube-system 15 | stringData: 16 | token: "${hcloud_token}" -------------------------------------------------------------------------------- /modules/kubernetes/files/ccm-networks.yaml: -------------------------------------------------------------------------------- 1 | # from: https://raw.githubusercontent.com/hetznercloud/hcloud-cloud-controller-manager/master/deploy/ccm-networks.yaml 2 | # NOTE: this release was tested against kubernetes v1.18.x 3 | --- 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: cloud-controller-manager 8 | namespace: kube-system 9 | --- 10 | kind: ClusterRoleBinding 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | metadata: 13 | name: system:cloud-controller-manager 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: ClusterRole 17 | name: cluster-admin 18 | subjects: 19 | - kind: ServiceAccount 20 | name: cloud-controller-manager 21 | namespace: kube-system 22 | --- 23 | apiVersion: apps/v1 24 | kind: Deployment 25 | metadata: 26 | name: hcloud-cloud-controller-manager 27 | namespace: kube-system 28 | spec: 29 | replicas: 1 30 | revisionHistoryLimit: 2 31 | selector: 32 | matchLabels: 33 | app: hcloud-cloud-controller-manager 34 | template: 35 | metadata: 36 | labels: 37 | app: hcloud-cloud-controller-manager 38 | spec: 39 | serviceAccountName: cloud-controller-manager 40 | dnsPolicy: Default 41 | tolerations: 42 | # this taint is set by all kubelets running `--cloud-provider=external` 43 | # so we should tolerate it to schedule the cloud controller manager 44 | - key: "node.cloudprovider.kubernetes.io/uninitialized" 45 | value: "true" 46 | effect: "NoSchedule" 47 | - key: "CriticalAddonsOnly" 48 | operator: "Exists" 49 | # cloud controller manages should be able to run on masters 50 | - key: "node-role.kubernetes.io/master" 51 | effect: NoSchedule 52 | operator: Exists 53 | - key: "node-role.kubernetes.io/control-plane" 54 | effect: NoSchedule 55 | operator: Exists 56 | - key: "node.kubernetes.io/not-ready" 57 | effect: "NoSchedule" 58 | hostNetwork: true 59 | containers: 60 | - image: hetznercloud/hcloud-cloud-controller-manager:v1.9.1 61 | name: hcloud-cloud-controller-manager 62 | command: 63 | - "/bin/hcloud-cloud-controller-manager" 64 | - "--cloud-provider=hcloud" 65 | - "--leader-elect=false" 66 | - "--allow-untagged-cloud" 67 | - "--allocate-node-cidrs=true" 68 | - "--cluster-cidr=10.244.0.0/16" 69 | resources: 70 | requests: 71 | cpu: 100m 72 | memory: 50Mi 73 | env: 74 | - name: NODE_NAME 75 | valueFrom: 76 | fieldRef: 77 | fieldPath: spec.nodeName 78 | - name: HCLOUD_TOKEN 79 | valueFrom: 80 | secretKeyRef: 81 | name: hcloud 82 | key: token 83 | - name: HCLOUD_NETWORK 84 | valueFrom: 85 | secretKeyRef: 86 | name: hcloud 87 | key: network 88 | priorityClassName: system-cluster-critical 89 | -------------------------------------------------------------------------------- /modules/kubernetes/files/hcloud-csi.yaml: -------------------------------------------------------------------------------- 1 | # from: https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml 2 | --- 3 | allowVolumeExpansion: true 4 | apiVersion: storage.k8s.io/v1 5 | kind: StorageClass 6 | metadata: 7 | annotations: 8 | storageclass.kubernetes.io/is-default-class: "true" 9 | name: hcloud-volumes 10 | namespace: kube-system 11 | provisioner: csi.hetzner.cloud 12 | volumeBindingMode: WaitForFirstConsumer 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: hcloud-csi-controller 18 | namespace: kube-system 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRole 22 | metadata: 23 | name: hcloud-csi-controller 24 | rules: 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - persistentvolumes 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - update 34 | - patch 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - nodes 39 | verbs: 40 | - get 41 | - list 42 | - watch 43 | - apiGroups: 44 | - csi.storage.k8s.io 45 | resources: 46 | - csinodeinfos 47 | verbs: 48 | - get 49 | - list 50 | - watch 51 | - apiGroups: 52 | - storage.k8s.io 53 | resources: 54 | - csinodes 55 | verbs: 56 | - get 57 | - list 58 | - watch 59 | - apiGroups: 60 | - storage.k8s.io 61 | resources: 62 | - volumeattachments 63 | verbs: 64 | - get 65 | - list 66 | - watch 67 | - update 68 | - patch 69 | - apiGroups: 70 | - storage.k8s.io 71 | resources: 72 | - volumeattachments/status 73 | verbs: 74 | - patch 75 | - apiGroups: 76 | - "" 77 | resources: 78 | - secrets 79 | verbs: 80 | - get 81 | - list 82 | - apiGroups: 83 | - "" 84 | resources: 85 | - persistentvolumes 86 | verbs: 87 | - get 88 | - list 89 | - watch 90 | - create 91 | - delete 92 | - patch 93 | - apiGroups: 94 | - "" 95 | resources: 96 | - persistentvolumeclaims 97 | - persistentvolumeclaims/status 98 | verbs: 99 | - get 100 | - list 101 | - watch 102 | - update 103 | - patch 104 | - apiGroups: 105 | - storage.k8s.io 106 | resources: 107 | - storageclasses 108 | verbs: 109 | - get 110 | - list 111 | - watch 112 | - apiGroups: 113 | - "" 114 | resources: 115 | - events 116 | verbs: 117 | - list 118 | - watch 119 | - create 120 | - update 121 | - patch 122 | - apiGroups: 123 | - snapshot.storage.k8s.io 124 | resources: 125 | - volumesnapshots 126 | verbs: 127 | - get 128 | - list 129 | - apiGroups: 130 | - snapshot.storage.k8s.io 131 | resources: 132 | - volumesnapshotcontents 133 | verbs: 134 | - get 135 | - list 136 | - apiGroups: 137 | - "" 138 | resources: 139 | - pods 140 | verbs: 141 | - get 142 | - list 143 | - watch 144 | - apiGroups: 145 | - "" 146 | resources: 147 | - events 148 | verbs: 149 | - get 150 | - list 151 | - watch 152 | - create 153 | - update 154 | - patch 155 | --- 156 | apiVersion: rbac.authorization.k8s.io/v1 157 | kind: ClusterRoleBinding 158 | metadata: 159 | name: hcloud-csi-controller 160 | roleRef: 161 | apiGroup: rbac.authorization.k8s.io 162 | kind: ClusterRole 163 | name: hcloud-csi-controller 164 | subjects: 165 | - kind: ServiceAccount 166 | name: hcloud-csi-controller 167 | namespace: kube-system 168 | --- 169 | apiVersion: v1 170 | kind: Service 171 | metadata: 172 | labels: 173 | app: hcloud-csi 174 | name: hcloud-csi-controller-metrics 175 | namespace: kube-system 176 | spec: 177 | ports: 178 | - name: metrics 179 | port: 9189 180 | targetPort: metrics 181 | selector: 182 | app: hcloud-csi-controller 183 | --- 184 | apiVersion: v1 185 | kind: Service 186 | metadata: 187 | labels: 188 | app: hcloud-csi 189 | name: hcloud-csi-node-metrics 190 | namespace: kube-system 191 | spec: 192 | ports: 193 | - name: metrics 194 | port: 9189 195 | targetPort: metrics 196 | selector: 197 | app: hcloud-csi 198 | --- 199 | apiVersion: apps/v1 200 | kind: Deployment 201 | metadata: 202 | name: hcloud-csi-controller 203 | namespace: kube-system 204 | spec: 205 | replicas: 1 206 | selector: 207 | matchLabels: 208 | app: hcloud-csi-controller 209 | template: 210 | metadata: 211 | labels: 212 | app: hcloud-csi-controller 213 | spec: 214 | containers: 215 | - image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 216 | name: csi-attacher 217 | volumeMounts: 218 | - mountPath: /run/csi 219 | name: socket-dir 220 | - image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 221 | name: csi-resizer 222 | volumeMounts: 223 | - mountPath: /run/csi 224 | name: socket-dir 225 | - args: 226 | - --feature-gates=Topology=true 227 | - --default-fstype=ext4 228 | image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 229 | name: csi-provisioner 230 | volumeMounts: 231 | - mountPath: /run/csi 232 | name: socket-dir 233 | - command: 234 | - /bin/hcloud-csi-driver-controller 235 | env: 236 | - name: CSI_ENDPOINT 237 | value: unix:///run/csi/socket 238 | - name: METRICS_ENDPOINT 239 | value: 0.0.0.0:9189 240 | - name: ENABLE_METRICS 241 | value: "true" 242 | - name: KUBE_NODE_NAME 243 | valueFrom: 244 | fieldRef: 245 | apiVersion: v1 246 | fieldPath: spec.nodeName 247 | - name: HCLOUD_TOKEN 248 | valueFrom: 249 | secretKeyRef: 250 | key: token 251 | name: hcloud 252 | image: hetznercloud/hcloud-csi-driver:latest 253 | imagePullPolicy: Always 254 | livenessProbe: 255 | failureThreshold: 5 256 | httpGet: 257 | path: /healthz 258 | port: healthz 259 | initialDelaySeconds: 10 260 | periodSeconds: 2 261 | timeoutSeconds: 3 262 | name: hcloud-csi-driver 263 | ports: 264 | - containerPort: 9189 265 | name: metrics 266 | - containerPort: 9808 267 | name: healthz 268 | protocol: TCP 269 | volumeMounts: 270 | - mountPath: /run/csi 271 | name: socket-dir 272 | - image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0 273 | imagePullPolicy: Always 274 | name: liveness-probe 275 | volumeMounts: 276 | - mountPath: /run/csi 277 | name: socket-dir 278 | serviceAccountName: hcloud-csi-controller 279 | volumes: 280 | - emptyDir: {} 281 | name: socket-dir 282 | --- 283 | apiVersion: apps/v1 284 | kind: DaemonSet 285 | metadata: 286 | labels: 287 | app: hcloud-csi 288 | name: hcloud-csi-node 289 | namespace: kube-system 290 | spec: 291 | selector: 292 | matchLabels: 293 | app: hcloud-csi 294 | template: 295 | metadata: 296 | labels: 297 | app: hcloud-csi 298 | spec: 299 | affinity: 300 | nodeAffinity: 301 | requiredDuringSchedulingIgnoredDuringExecution: 302 | nodeSelectorTerms: 303 | - matchExpressions: 304 | - key: instance.hetzner.cloud/is-root-server 305 | operator: NotIn 306 | values: 307 | - "true" 308 | containers: 309 | - args: 310 | - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket 311 | image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 312 | name: csi-node-driver-registrar 313 | volumeMounts: 314 | - mountPath: /run/csi 315 | name: plugin-dir 316 | - mountPath: /registration 317 | name: registration-dir 318 | - command: 319 | - /bin/hcloud-csi-driver-node 320 | env: 321 | - name: CSI_ENDPOINT 322 | value: unix:///run/csi/socket 323 | - name: METRICS_ENDPOINT 324 | value: 0.0.0.0:9189 325 | - name: ENABLE_METRICS 326 | value: "true" 327 | image: hetznercloud/hcloud-csi-driver:latest 328 | imagePullPolicy: Always 329 | livenessProbe: 330 | failureThreshold: 5 331 | httpGet: 332 | path: /healthz 333 | port: healthz 334 | initialDelaySeconds: 10 335 | periodSeconds: 2 336 | timeoutSeconds: 3 337 | name: hcloud-csi-driver 338 | ports: 339 | - containerPort: 9189 340 | name: metrics 341 | - containerPort: 9808 342 | name: healthz 343 | protocol: TCP 344 | securityContext: 345 | privileged: true 346 | volumeMounts: 347 | - mountPath: /var/lib/kubelet 348 | mountPropagation: Bidirectional 349 | name: kubelet-dir 350 | - mountPath: /run/csi 351 | name: plugin-dir 352 | - mountPath: /dev 353 | name: device-dir 354 | - image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0 355 | imagePullPolicy: Always 356 | name: liveness-probe 357 | volumeMounts: 358 | - mountPath: /run/csi 359 | name: plugin-dir 360 | tolerations: 361 | - effect: NoExecute 362 | operator: Exists 363 | - effect: NoSchedule 364 | operator: Exists 365 | - key: CriticalAddonsOnly 366 | operator: Exists 367 | volumes: 368 | - hostPath: 369 | path: /var/lib/kubelet 370 | type: Directory 371 | name: kubelet-dir 372 | - hostPath: 373 | path: /var/lib/kubelet/plugins/csi.hetzner.cloud/ 374 | type: DirectoryOrCreate 375 | name: plugin-dir 376 | - hostPath: 377 | path: /var/lib/kubelet/plugins_registry/ 378 | type: Directory 379 | name: registration-dir 380 | - hostPath: 381 | path: /dev 382 | type: Directory 383 | name: device-dir 384 | --- 385 | apiVersion: storage.k8s.io/v1 386 | kind: CSIDriver 387 | metadata: 388 | name: csi.hetzner.cloud 389 | spec: 390 | attachRequired: true 391 | podInfoOnMount: true 392 | volumeLifecycleModes: 393 | - Persistent 394 | fsGroupPolicy: File 395 | -------------------------------------------------------------------------------- /modules/kubernetes/files/kube-flannel.yaml: -------------------------------------------------------------------------------- 1 | # from https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 2 | --- 3 | kind: Namespace 4 | apiVersion: v1 5 | metadata: 6 | name: kube-flannel 7 | labels: 8 | pod-security.kubernetes.io/enforce: privileged 9 | --- 10 | kind: ClusterRole 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | metadata: 13 | name: flannel 14 | rules: 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - pods 19 | verbs: 20 | - get 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - nodes 25 | verbs: 26 | - list 27 | - watch 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - nodes/status 32 | verbs: 33 | - patch 34 | --- 35 | kind: ClusterRoleBinding 36 | apiVersion: rbac.authorization.k8s.io/v1 37 | metadata: 38 | name: flannel 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: flannel 43 | subjects: 44 | - kind: ServiceAccount 45 | name: flannel 46 | namespace: kube-flannel 47 | --- 48 | apiVersion: v1 49 | kind: ServiceAccount 50 | metadata: 51 | name: flannel 52 | namespace: kube-flannel 53 | --- 54 | kind: ConfigMap 55 | apiVersion: v1 56 | metadata: 57 | name: kube-flannel-cfg 58 | namespace: kube-flannel 59 | labels: 60 | tier: node 61 | app: flannel 62 | data: 63 | cni-conf.json: | 64 | { 65 | "name": "cbr0", 66 | "cniVersion": "0.3.1", 67 | "plugins": [ 68 | { 69 | "type": "flannel", 70 | "delegate": { 71 | "hairpinMode": true, 72 | "isDefaultGateway": true 73 | } 74 | }, 75 | { 76 | "type": "portmap", 77 | "capabilities": { 78 | "portMappings": true 79 | } 80 | } 81 | ] 82 | } 83 | net-conf.json: | 84 | { 85 | "Network": "10.244.0.0/16", 86 | "Backend": { 87 | "Type": "vxlan" 88 | } 89 | } 90 | --- 91 | apiVersion: apps/v1 92 | kind: DaemonSet 93 | metadata: 94 | name: kube-flannel-ds 95 | namespace: kube-flannel 96 | labels: 97 | tier: node 98 | app: flannel 99 | spec: 100 | selector: 101 | matchLabels: 102 | app: flannel 103 | template: 104 | metadata: 105 | labels: 106 | tier: node 107 | app: flannel 108 | spec: 109 | affinity: 110 | nodeAffinity: 111 | requiredDuringSchedulingIgnoredDuringExecution: 112 | nodeSelectorTerms: 113 | - matchExpressions: 114 | - key: kubernetes.io/os 115 | operator: In 116 | values: 117 | - linux 118 | hostNetwork: true 119 | priorityClassName: system-node-critical 120 | tolerations: 121 | - operator: Exists 122 | effect: NoSchedule 123 | serviceAccountName: flannel 124 | initContainers: 125 | - name: install-cni-plugin 126 | #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply) 127 | image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 128 | command: 129 | - cp 130 | args: 131 | - -f 132 | - /flannel 133 | - /opt/cni/bin/flannel 134 | volumeMounts: 135 | - name: cni-plugin 136 | mountPath: /opt/cni/bin 137 | - name: install-cni 138 | #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply) 139 | image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2 140 | command: 141 | - cp 142 | args: 143 | - -f 144 | - /etc/kube-flannel/cni-conf.json 145 | - /etc/cni/net.d/10-flannel.conflist 146 | volumeMounts: 147 | - name: cni 148 | mountPath: /etc/cni/net.d 149 | - name: flannel-cfg 150 | mountPath: /etc/kube-flannel/ 151 | containers: 152 | - name: kube-flannel 153 | #image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply) 154 | image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2 155 | command: 156 | - /opt/bin/flanneld 157 | args: 158 | - --ip-masq 159 | - --kube-subnet-mgr 160 | resources: 161 | requests: 162 | cpu: "100m" 163 | memory: "50Mi" 164 | limits: 165 | cpu: "100m" 166 | memory: "50Mi" 167 | securityContext: 168 | privileged: false 169 | capabilities: 170 | add: ["NET_ADMIN", "NET_RAW"] 171 | env: 172 | - name: POD_NAME 173 | valueFrom: 174 | fieldRef: 175 | fieldPath: metadata.name 176 | - name: POD_NAMESPACE 177 | valueFrom: 178 | fieldRef: 179 | fieldPath: metadata.namespace 180 | - name: EVENT_QUEUE_DEPTH 181 | value: "5000" 182 | volumeMounts: 183 | - name: run 184 | mountPath: /run/flannel 185 | - name: flannel-cfg 186 | mountPath: /etc/kube-flannel/ 187 | - name: xtables-lock 188 | mountPath: /run/xtables.lock 189 | volumes: 190 | - name: run 191 | hostPath: 192 | path: /run/flannel 193 | - name: cni-plugin 194 | hostPath: 195 | path: /opt/cni/bin 196 | - name: cni 197 | hostPath: 198 | path: /etc/cni/net.d 199 | - name: flannel-cfg 200 | configMap: 201 | name: kube-flannel-cfg 202 | - name: xtables-lock 203 | hostPath: 204 | path: /run/xtables.lock 205 | type: FileOrCreate 206 | -------------------------------------------------------------------------------- /modules/kubernetes/files/sysctl.conf: -------------------------------------------------------------------------------- 1 | # These settings will allow forwarding of IPv4 and IPv6 packages between multiple network interfaces. 2 | # This is required because each container has its own virtual network interface. 3 | 4 | # Allow IP forwarding for kubernetes 5 | net.bridge.bridge-nf-call-iptables = 1 6 | net.bridge.bridge-nf-call-ip6tables = 1 7 | net.ipv4.ip_forward = 1 8 | net.ipv6.conf.all.forwarding = 1 -------------------------------------------------------------------------------- /modules/kubernetes/kubeadm_join.tf: -------------------------------------------------------------------------------- 1 | # kubernetes/kubeadm_join.tf 2 | 3 | resource "null_resource" "kubeadm_join" { 4 | count = length(var.worker_nodes) 5 | depends_on = [null_resource.install] 6 | 7 | connection { 8 | host = element(var.worker_nodes.*.ipv4_address, count.index) 9 | user = "root" 10 | agent = true 11 | } 12 | 13 | provisioner "local-exec" { 14 | command = < /tmp/kubeadm_token' 17 | EOT 18 | } 19 | 20 | provisioner "local-exec" { 21 | command = < /etc/apt/sources.list.d/docker-and-kubernetes.list 16 | deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable 17 | deb http://packages.cloud.google.com/apt/ kubernetes-xenial main 18 | EOF 19 | 20 | apt-get -qq update 21 | apt-get -qq install -y docker-ce 22 | apt-get -qq install -y kubelet=${kubernetes_version}-* kubeadm=${kubernetes_version}-* kubectl=${kubernetes_version}-* 23 | sysctl -p 24 | -------------------------------------------------------------------------------- /modules/kubernetes/scripts/worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | [ -e /tmp/access_tokens.conf ] && rm /tmp/access_tokens.conf 5 | 6 | until $(nc -z ${control_plane_private_ip} 6443); do 7 | echo "Waiting for API server to respond" 8 | sleep 5 9 | done 10 | 11 | token=$(cat /tmp/kubeadm_token) 12 | 13 | # join the worker nodes 14 | [ -f /etc/containerd/config.toml ] && rm /etc/containerd/config.toml 15 | systemctl restart containerd 16 | kubeadm join --token=$${token} ${control_plane_private_ip}:6443 \ 17 | --discovery-token-unsafe-skip-ca-verification \ 18 | --ignore-preflight-errors=Swap 19 | -------------------------------------------------------------------------------- /modules/kubernetes/variables.tf: -------------------------------------------------------------------------------- 1 | # kubernetes/variables.tf 2 | 3 | # GENERAL 4 | variable "hcloud_token" { 5 | default = "" 6 | } 7 | 8 | # NETWORK 9 | variable "network_id" { 10 | type = string 11 | } 12 | 13 | variable "private_ips" { 14 | type = list(any) 15 | } 16 | 17 | # CONTROL-PLANE NODES 18 | variable "control_plane_nodes" { 19 | type = list(any) 20 | } 21 | 22 | # WORKER NODES 23 | variable "worker_nodes" { 24 | type = list(any) 25 | } 26 | 27 | # KUBERNETES 28 | variable "kubernetes_version" { 29 | type = string 30 | } 31 | 32 | variable "cluster_name" { 33 | type = string 34 | } 35 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | # get cluster module output 2 | output "network_id" { 3 | value = module.cluster.network_id 4 | description = "Unique ID of the network." 5 | } 6 | 7 | output "private_ips" { 8 | value = module.cluster.private_ips 9 | description = "The IPv4 addresses within the private network." 10 | } 11 | 12 | output "control_plane_nodes" { 13 | value = module.cluster.control_plane_nodes 14 | description = "The control-plane node objects." 15 | } 16 | 17 | output "control_plane_nodes_ips" { 18 | value = module.cluster.control_plane_nodes.*.ipv4_address 19 | description = "The IPv4 addresses within the control-plane network." 20 | } 21 | 22 | output "control_plane_nodes_ids" { 23 | value = module.cluster.control_plane_nodes.*.id 24 | description = "The ids of the control-plane nodes." 25 | } 26 | 27 | output "worker_nodes" { 28 | value = module.cluster.worker_nodes 29 | description = "The worker node objects." 30 | } 31 | 32 | output "worker_nodes_ips" { 33 | value = module.cluster.worker_nodes.*.ipv4_address 34 | description = "The IPv4 addresses within the worker network." 35 | } 36 | 37 | output "worker_nodes_ids" { 38 | value = module.cluster.worker_nodes.*.id 39 | description = "The ids of the worker nodes." 40 | } 41 | 42 | output "kubeconfig" { 43 | value = module.kubernetes.kubeconfig 44 | description = "Kubectl config file contents for the cluster." 45 | } 46 | 47 | output "endpoint" { 48 | value = module.kubernetes.endpoint 49 | description = "The endpoint for the Kubernetes API." 50 | } 51 | 52 | output "certificate_authority_data" { 53 | value = module.kubernetes.certificate_authority_data 54 | description = "Nested attribute containing certificate-authority-data for the cluster. This is the base64 encoded certificate data required to communicate with the cluster." 55 | } 56 | 57 | output "client_certificate_data" { 58 | value = module.kubernetes.client_certificate_data 59 | description = "Client certificate to communicate with the API." 60 | } 61 | 62 | output "client_key_data" { 63 | value = module.kubernetes.client_key_data 64 | description = "Client key to communicate with the API." 65 | } 66 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # GENERAL 2 | variable "cluster_name" { 3 | description = "(Required) - The name of the cluster." 4 | type = string 5 | } 6 | 7 | variable "hcloud_token" { 8 | description = "(Required) - The Hetzner Cloud API Token, can also be specified with the HCLOUD_TOKEN environment variable." 9 | type = string 10 | } 11 | 12 | variable "hcloud_ssh_keys" { 13 | description = "(Required) - SSH key IDs or names which should be injected into the server at creation time." 14 | type = list(any) 15 | } 16 | 17 | variable "location" { 18 | description = "(Optional) - Location, e.g. 'nbg1' (Neurenberg)." 19 | type = string 20 | default = "nbg1" 21 | } 22 | 23 | # NETWORK 24 | variable "network_zone" { 25 | description = "(Optional) - Name of network zone, e.g. 'eu-central'." 26 | type = string 27 | default = "eu-central" 28 | } 29 | 30 | variable "network_ip_range" { 31 | description = "(Optional) - IP Range of the whole Network which must span all included subnets and route destinations. Must be one of the private ipv4 ranges of RFC1918." 32 | type = string 33 | default = "10.98.0.0/16" 34 | } 35 | 36 | variable "subnet_ip_range" { 37 | description = "(Optional) - Range to allocate IPs from. Must be a subnet of the ip_range of the Network and must not overlap with any other subnets or with any destinations in routes." 38 | type = string 39 | default = "10.98.0.0/16" 40 | } 41 | 42 | # CONTROL-PLANE NODES 43 | variable "control_plane_type" { 44 | description = "(Optional) - For more types have a look at https://www.hetzner.de/cloud" 45 | type = string 46 | default = "cx11" 47 | } 48 | 49 | variable "control_plane_count" { 50 | description = "(Optional) - Number of control-plane nodes." 51 | type = number 52 | default = 1 53 | } 54 | 55 | variable "image" { 56 | description = "(Optional) - Predefined Image that will be used to spin up the machines." 57 | type = string 58 | default = "ubuntu-20.04" 59 | } 60 | 61 | variable "control_plane_name_format" { 62 | description = "(Optional) - Format for the control-plane node names, defaults to 'control-plane-0'." 63 | type = string 64 | default = "control-plane-%d" 65 | } 66 | 67 | # WORKER NODES 68 | variable "worker_type" { 69 | description = "(Optional) - For more types have a look at https://www.hetzner.de/cloud" 70 | type = string 71 | default = "cx21" 72 | } 73 | 74 | variable "worker_count" { 75 | description = "(Required) - Number of worker nodes." 76 | type = number 77 | } 78 | 79 | variable "worker_name_format" { 80 | description = "(Optional) - Format for the worker node names, defaults to 'worker-0'." 81 | type = string 82 | default = "worker-%d" 83 | } 84 | 85 | # KUBERNETES 86 | variable "kubernetes_version" { 87 | description = "(Optional) - Kubernetes version installed, e.g. '1.25.2'." 88 | type = string 89 | default = "1.25.2" 90 | } 91 | --------------------------------------------------------------------------------