├── test ├── variables.tf ├── main.tf └── outputs.tf ├── scripts ├── kubeconfig.sh └── provision.sh ├── .gitignore ├── outputs.tf ├── CHANGELOG.md ├── variables.tf ├── README.md └── main.tf /test/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_vultr_api_key" { 2 | type = string 3 | sensitive = true 4 | } 5 | -------------------------------------------------------------------------------- /scripts/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | KUBECONFIG=$(k0sctl kubeconfig | base64) 5 | 6 | jq -n --arg kubeconfig "$KUBECONFIG" '{"kubeconfig":$kubeconfig}' 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | *.log 4 | *.env 5 | tmp/ 6 | admin*.conf 7 | k0sctl.yaml 8 | terraform.tfstate* 9 | .terraform 10 | .terraform.tfstate.lock.info 11 | .terraform.lock.hcl 12 | -------------------------------------------------------------------------------- /test/main.tf: -------------------------------------------------------------------------------- 1 | module "k0s" { 2 | source = "../" 3 | controller_count = 3 4 | provisioner_public_key = chomp(file("~/.ssh/id_rsa.pub")) 5 | cluster_vultr_api_key = var.cluster_vultr_api_key 6 | control_plane_firewall_rules = [ 7 | { 8 | port = 6443 9 | ip_type = "v4" 10 | source = "0.0.0.0/0" 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /test/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_id" { 2 | value = module.k0s.cluster_id 3 | } 4 | 5 | output "cluster_network_id" { 6 | value = module.k0s.cluster_network_id 7 | } 8 | 9 | output "control_plane_lb_id" { 10 | value = module.k0s.control_plane_lb_id 11 | } 12 | 13 | output "control_plane_address" { 14 | value = module.k0s.control_plane_address 15 | } 16 | 17 | output "cluster_firewall_group_id" { 18 | value = module.k0s.cluster_firewall_group_id 19 | } 20 | 21 | output "controller_ips" { 22 | value = module.k0s.controller_ips 23 | } 24 | 25 | output "controller_ids" { 26 | value = module.k0s.controller_ids 27 | } 28 | 29 | output "worker_ips" { 30 | value = module.k0s.worker_ips 31 | } 32 | 33 | output "worker_ids" { 34 | value = module.k0s.worker_ids 35 | } 36 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_id" { 2 | value = random_id.cluster.hex 3 | } 4 | 5 | output "cluster_network_id" { 6 | value = vultr_private_network.cluster.id 7 | } 8 | 9 | output "control_plane_lb_id" { 10 | value = vultr_load_balancer.control_plane_ha.id 11 | } 12 | 13 | output "control_plane_address" { 14 | value = vultr_load_balancer.control_plane_ha.ipv4 15 | } 16 | 17 | output "cluster_firewall_group_id" { 18 | value = vultr_firewall_group.cluster.id 19 | } 20 | 21 | output "controller_ips" { 22 | value = vultr_instance.control_plane.*.main_ip 23 | } 24 | 25 | output "controller_ids" { 26 | value = vultr_instance.control_plane.*.id 27 | } 28 | 29 | output "worker_ips" { 30 | value = vultr_instance.worker.*.main_ip 31 | } 32 | 33 | output "worker_ids" { 34 | value = vultr_instance.worker.*.id 35 | } 36 | -------------------------------------------------------------------------------- /scripts/provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euxo posix 3 | 4 | safe_apt(){ 5 | while fuser /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock >/dev/null 2>&1 ; do 6 | echo "Waiting for apt lock..." 7 | sleep 1 8 | done 9 | apt "$@" 10 | } 11 | 12 | safe_apt -y update 13 | safe_apt -y install jq 14 | 15 | PUBLIC_MAC=$(curl --silent 169.254.169.254/v1.json | jq -r '.interfaces[0].mac') 16 | PUBLIC_NIC=$(ip -j link | jq --arg PUBLIC_MAC $PUBLIC_MAC '.[] | select(.address==$PUBLIC_MAC)' | jq -r .ifname) 17 | INTERNAL_MAC=$(curl --silent 169.254.169.254/v1.json | jq -r '.interfaces[1].mac') 18 | INTERNAL_NIC=$(ip -j link | jq --arg INTERNAL_MAC $INTERNAL_MAC '.[] | select(.address==$INTERNAL_MAC)' | jq -r .ifname) 19 | INTERNAL_IP=$1 20 | CONTROL_PLANE_PORTS=(6443 2379 2380 10250 10251 10252 8132 8133 9443) 21 | 22 | if [ $(echo $HOSTNAME | grep controller) ]; then 23 | NODE_ROLE=controller 24 | elif [ $(echo $HOSTNAME | grep worker) ]; then 25 | NODE_ROLE=worker 26 | fi 27 | 28 | case $NODE_ROLE in 29 | controller) 30 | for port in "${CONTROL_PLANE_PORTS[@]}"; do 31 | ufw allow $port 32 | ufw allow in on $INTERNAL_NIC 33 | done 34 | ;; 35 | worker) 36 | ufw allow 10250 37 | ufw allow 179 38 | ufw allow 9443 39 | ufw allow 4789/udp 40 | ufw allow 8132:8133/tcp 41 | ufw allow 30000:32767/tcp 42 | ufw allow in on $INTERNAL_NIC 43 | ;; 44 | esac 45 | 46 | ufw reload 47 | 48 | cat <<-EOF > /etc/systemd/network/public.network 49 | [Match] 50 | MACAddress=$PUBLIC_MAC 51 | 52 | [Network] 53 | DHCP=yes 54 | EOF 55 | 56 | cat <<-EOF > /etc/systemd/network/private.network 57 | [Match] 58 | MACAddress=$INTERNAL_MAC 59 | 60 | [Network] 61 | Address=$INTERNAL_IP 62 | EOF 63 | 64 | echo "# For k0s" >> /etc/hosts 65 | echo "$INTERNAL_IP $(hostname)" >> /etc/hosts 66 | 67 | systemctl enable systemd-networkd systemd-resolved 68 | systemctl restart systemd-networkd systemd-resolved 69 | systemctl disable networking 70 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | ## [v2.0.0](https://github.com/vultr/terraform-vultr-condor/releases/tag/v2.0.0) (2021-09-03) 3 | ### Breaking Changes 4 | * Condor is now based on Mirantis K0s rather than Kubeadm, as such v2 is completely incompatible with previous releases. 5 | ### Features 6 | * HA Control Plane 7 | * Isolated control plane(Control Plane nodes are not part of the cluster) 8 | * Control Plane and Worker Node firewalls. 9 | * Declarative cluster and component(Vultr CCM, CSI, Calico, etc.) upgrades 10 | * K0s manifest deployer support 11 | 12 | ## [v1.3.0](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.3.0) (2021-08-25) 13 | ### Breaking Changes 14 | * Remove `calico_wireguard` variable - nodes were not properly configured, will need to revisit 15 | * Add `calico_mode` variable - previously defaulted to `vxlan`, is now configurable but defaults to `bird` 16 | ### Changes 17 | * Bump K0s release from `v1.21.1+k0s.0` to `v1.21.3+k0s.0` 18 | * Bump kube-system component versions 19 | 20 | ## [v1.2.3](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.2.3) (2021-07-14) 21 | ### Fixes 22 | * Handle dynamic NIC names. 23 | 24 | ## [v1.2.2](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.2.2) (2021-06-21) 25 | ### Fixes 26 | * Fix firewall configuration after Vultr image changes. 27 | 28 | ## [v1.2.1](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.2.1) (2021-06-19) 29 | ### Changes 30 | * Template Vultr CSI 31 | * Add Vultr CSI image/version vars 32 | * Add kubeconfig filename tf workspace suffix 33 | 34 | ## [v1.2.0](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.2.0) (2021-06-12) 35 | ### Features 36 | * Support K0s Helm deployments. 37 | ### Changes 38 | * Convert module internal K0sctl configuration to HCL from YAML. 39 | ### Fixes 40 | * Change Controller/Worker network interfaces from ens3/ens7 to enp1s0/enp6s0 due to Vultr image changes. 41 | 42 | ## [v1.1.0](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.1.0) (2021-06-06) 43 | ### Features 44 | * Write Kubeconfig locally option. 45 | * Control Plane VLB Firewall. 46 | ### Changes 47 | * Add variable descriptions. 48 | * Lock up cluster firewall, SSH only by default. 49 | * Docs updates. 50 | ### Fixes 51 | * README markdown. 52 | 53 | ## [v1.0.1](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.0.1) (2021-06-05) 54 | ### Fixes 55 | * Remove unused variables from triggers map. 56 | 57 | ## [v1.0.0](https://github.com/3letteragency/terraform-vultr-k0s/releases/tag/v1.0.0) (2021-06-05) 58 | ### First Release 59 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "A name for your cluster." 3 | type = string 4 | default = "default" 5 | } 6 | 7 | variable "provisioner_public_key" { 8 | description = "SSH Public Key for Terraform provisioner access." 9 | type = string 10 | } 11 | 12 | variable "extra_public_keys" { 13 | description = "Extra(in addition the provisioner key) SSH Keys to add to the cluster nodes." 14 | type = list(string) 15 | default = [] 16 | } 17 | 18 | variable "region" { 19 | description = "Vultr deployment region." 20 | type = string 21 | default = "ewr" 22 | } 23 | 24 | variable "node_subnet" { 25 | description = "Subnet to use for the Vultr Private Network." 26 | type = string 27 | default = "10.240.0.0/24" 28 | } 29 | 30 | variable "controller_count" { 31 | description = "Number of Control plane nodes." 32 | type = number 33 | default = 1 34 | } 35 | 36 | variable "ha_lb_algorithm" { 37 | description = "Control Plane VLB balancing algorithm." 38 | type = string 39 | default = "roundrobin" 40 | } 41 | 42 | variable "ha_lb_health_response_timeout" { 43 | description = "Control Plane VLB healthcheck response timeout." 44 | type = number 45 | default = 3 46 | } 47 | 48 | variable "ha_lb_health_unhealthy_threshold" { 49 | description = "Control Plane VLB healthcheck unhealthy node threshold." 50 | type = number 51 | default = 1 52 | } 53 | 54 | variable "ha_lb_health_check_interval" { 55 | description = "Control Plane VLB healthcheck interval." 56 | type = number 57 | default = 3 58 | } 59 | 60 | variable "ha_lb_health_healthy_threshold" { 61 | description = "Control Plane VLB healthcheck healthy node threshold." 62 | type = number 63 | default = 2 64 | } 65 | 66 | variable "enable_ipv6" { 67 | description = "Cluster IPv6 for future use NOT CURRENTLY SUPPORTED." 68 | type = bool 69 | default = false 70 | } 71 | 72 | variable "activation_email" { 73 | description = "Enable/disable cluster node activation emails." 74 | type = bool 75 | default = false 76 | } 77 | 78 | variable "ddos_protection" { 79 | description = "Enable/disable cluster node DDOS Protection." 80 | type = bool 81 | default = false 82 | } 83 | 84 | variable "tag" { 85 | description = "Cluster node tags." 86 | type = string 87 | default = "" 88 | } 89 | 90 | variable "worker_count" { 91 | description = "Number of cluster workers to deploy." 92 | type = string 93 | default = 3 94 | } 95 | 96 | variable "pod_cidr" { 97 | description = "Pod CIDR Subnet." 98 | type = string 99 | default = "10.244.0.0/16" 100 | } 101 | 102 | variable "svc_cidr" { 103 | description = "Cluster Service CIDR subnet." 104 | type = string 105 | default = "10.96.0.0/12" 106 | } 107 | 108 | variable "pod_sec_policy" { 109 | description = "K0s Pod Security Policy." 110 | type = string 111 | default = "00-k0s-privileged" 112 | } 113 | 114 | variable "konnectivity_version" { 115 | description = "K0s Configuration Konnectivity Version." 116 | type = string 117 | default = "v0.0.16" 118 | } 119 | 120 | variable "metrics_server_version" { 121 | description = "K0s Configuration Kube Metrics Version." 122 | type = string 123 | default = "v0.3.7" 124 | } 125 | 126 | variable "kube_proxy_version" { 127 | description = "K0s Configuration Kube Proxy version." 128 | type = string 129 | default = "v1.21.3" 130 | } 131 | 132 | variable "core_dns_version" { 133 | description = "K0s Configuration CoreDNS version." 134 | type = string 135 | default = "1.7.0" 136 | } 137 | 138 | variable "calico_version" { 139 | description = "K0s Configuration Calico version." 140 | type = string 141 | default = "v3.18.1" 142 | } 143 | 144 | variable "cluster_os" { 145 | description = "Cluster node OS." 146 | type = string 147 | default = "Debian 10 x64 (buster)" 148 | } 149 | 150 | variable "worker_plan" { 151 | description = "Cluster worker node Vultr machine type/plan." 152 | type = string 153 | default = "vc2-2c-4gb" 154 | } 155 | 156 | variable "controller_plan" { 157 | description = "Cluster controller node Vultr machine type/plan." 158 | type = string 159 | default = "vc2-2c-4gb" 160 | } 161 | 162 | variable "k0s_version" { 163 | description = "K0s Configuration K0s version." 164 | type = string 165 | default = "v1.21.3+k0s.0" 166 | } 167 | 168 | variable "write_kubeconfig" { 169 | description = "Write Kubeconfig locally." 170 | type = bool 171 | default = true 172 | } 173 | 174 | variable "cluster_vultr_api_key" { 175 | description = "Vultr API Key for CCM and CSI." 176 | type = string 177 | sensitive = true 178 | } 179 | 180 | variable "vultr_ccm_version" { 181 | description = "Vultr Cloud Controller Manager version." 182 | type = string 183 | default = "v0.2.0" 184 | } 185 | 186 | variable "vultr_csi_version" { 187 | description = "Vultr Cloud Storage Interface version." 188 | type = string 189 | default = "v0.1.1" 190 | } 191 | 192 | variable "control_plane_firewall_rules" { 193 | description = "Control Plane VLB Firewall Rules." 194 | type = list(object({ 195 | port = number 196 | ip_type = string 197 | source = string 198 | })) 199 | } 200 | 201 | variable "allow_ssh" { 202 | description = "Vultr Firewall Rule to allow SSH globally to all cluster nodes(control plane + workers)." 203 | type = bool 204 | default = true 205 | } 206 | 207 | variable "helm_repositories" { 208 | type = list(map(any)) 209 | default = [] 210 | } 211 | 212 | variable "helm_charts" { 213 | type = list(map(any)) 214 | default = [] 215 | } 216 | 217 | variable "vultr_csi_image" { 218 | type = string 219 | default = "vultr/vultr-csi" 220 | } 221 | 222 | variable "calico_mode" { 223 | type = string 224 | default = "bird" 225 | } 226 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Condor 2 | 3 | [Terraform Module Registry Docs](https://registry.terraform.io/modules/vultr/condor/vultr/latest) 4 | 5 | ## Table of Contents 6 | * [Requirements](#requirements) 7 | * [Quick Start](#quick-start) 8 | * [Firewall Configuration](#firewall-configuration) 9 | * [Control Plane HA VLB Firewall](#control-plane-ha-vlb-firewall) 10 | * [Cluster Nodes Vultr Firewall](#cluster-nodes-vultr-firewall) 11 | * [Extensions](#extensions) 12 | * [Helm](#helm) 13 | * [Manifest Bundles](#manifest-bundles) 14 | * [Limitations](#limitations) 15 | 16 | ## Requirements 17 | * A funded Vultr account and API Key, should be configured as an environment variable to be consumed by the Terraform provider: 18 | ```sh 19 | export VULTR_API_KEY="" 20 | ``` 21 | * The [k0sctl](https://github.com/k0sproject/k0sctl) executable must be installed and in your executable path. 22 | * A configured ssh-agent, i.e.: 23 | ```sh 24 | ssh-add ~/.ssh/id_rsa 25 | ``` 26 | 27 | ## Cloud Addons 28 | * Installs the [Vultr CCM](https://github.com/vultr/vultr-cloud-controller-manager) 29 | * Installs the [Vultr CSI](https://github.com/vultr/vultr-csi) 30 | 31 | ## Quick Start 32 | Usage and input details can be found in the [Terraform Module Registry Docs](https://registry.terraform.io/modules/vultr/condor/vultr/latest), or use the quickstart below. 33 | 34 | 1) Create a `main.tf` file with the [Required Inputs](https://registry.terraform.io/modules/vultr/condor/vultr/latest?tab=inputs#required-inputs): 35 | ``` hcl 36 | # main.tf 37 | 38 | module "condor" { 39 | source = "vultr/condor/vultr" 40 | version = "2.0.0" 41 | provisioner_public_key = chomp(file("~/.ssh/id_rsa.pub")) 42 | cluster_vultr_api_key = var.cluster_vultr_api_key 43 | control_plane_firewall_rules = [ 44 | { 45 | port = 6443 46 | ip_type = "v4" 47 | source = "0.0.0.0/0" 48 | } 49 | ] 50 | } 51 | ``` 52 | * The Control Plane Firewall rule in this example exposes the Kubernetes API globally, it is recommended you configure a more restrictive rule or rules on production clusters. 53 | * Passing the Cluster API Key as plain text is not recommended for anything beyond testing, use an environment variable as described [here](https://www.terraform.io/docs/cli/config/environment-variables.html#tf_var_name). 54 | 55 | 2) Configure any [Optional Inputs](https://registry.terraform.io/modules/vultr/condor/vultr/latest?tab=inputs#optional-inputs) if you wish to change from the defaults. 56 | 57 | 3) Deploy 58 | ``` sh 59 | $ terraform init && terraform apply 60 | ``` 61 | 62 | 4) The Admin Kubeconfig is written locally to your working directory by default as `admin-.conf`. This may not be desireable in some scenarios, e.g. running Terraform in automation(CI/CD pipelines). If you wish to change this set the `write_kubeconfig` variable to false. If you wish to retreive the kubeconfig manually, you can run the following after your cluster has been deployed: 63 | ``` sh 64 | $ k0sctl kubeconfig > /path/to/admin.conf 65 | ``` 66 | 67 | 5) Verify cluster functionality 68 | ``` sh 69 | kubectl --kubeconfig admin.conf get no 70 | kubectl --kubeconfig admin.conf get po -n kube-system 71 | ``` 72 | 73 | ## Firewall Configuration 74 | ### Control Plane HA VLB Firewall 75 | The Control Plane LB Firewall is configured to allow only what is needed by the cluster as described in the [K0s Networking Docs](https://docs.k0sproject.io/v1.21.1+k0s.0/networking/#required-ports-and-protocols) by default. The Kubernetes API will not be accessible without configuring an additional rule or rules(as shown in the quickstart example) via the `control_plane_firewall_rules` input variable. E.g.: 76 | ``` hcl 77 | control_plane_firewall_rules = [ 78 | { 79 | port = 6443 80 | ip_type = "v4" 81 | source = "0.0.0.0/0" 82 | } 83 | ] 84 | ``` 85 | As also stated in the quickstart, this example rule exposes the Kubernetes API globally, your rules should be more restrictive for production clusters. 86 | 87 | ### Cluster Nodes Vultr Firewall 88 | The cluster nodes(control plane and workers) Vultr Firewall defaults to allowing only SSH globally. This is generally acceptable, however if you would like to restrict access further you may disable this rule by setting the `allow_ssh` input variable to `false` then configuring the desired rule/rules outside of this module using the `cluster_firewall_group_id` output in your rules. 89 | 90 | ## Extensions 91 | ### Helm 92 | Helm Repositories and Charts may be configured/deployed during initial cluster init via the `helm_repositories` and `helm_charts` variables. Note, namespace in a chart definition is required for K0s. Example: 93 | ``` hcl 94 | helm_repositories = [ 95 | { 96 | name = "argo" 97 | url = "https://argoproj.github.io/argo-helm" 98 | } 99 | ] 100 | helm_charts = [ 101 | { 102 | name = "argocd" 103 | chartname = "argo/argo-cd" 104 | version = "3.6.8" 105 | namespace = "argocd" 106 | } 107 | ] 108 | ``` 109 | Please see the [Helm Chart Deployer](https://docs.k0sproject.io/v1.21.3+k0s.0/helm-charts/#helm-charts) docs for a comprehensive list of field/parameter values and further details. Note, this feature entails [Limitations](#limitations). 110 | 111 | ### Manifest Bundles 112 | You may deploy any Kubernetes manifests automatically with the [K0s Manifest Deployer](https://docs.k0sproject.io/v1.21.1+k0s.0/manifests/#manifest-deployer) by placing your manifests in the `/var/lib/k0s/manifests` directory. Doing so via this module is not supported, however you may use the resulting `controller_ips` module output as arguments to a separate module that copies your manifests to the specified directory(or as stated in the linked K0s docs, a "stack" subdirectory). 113 | 114 | ## Limitations 115 | * Shrinking of the Control Plane is not supported, only growing. You will need to manually run `k0s etcd leave` on all Control Plane nodes with index > 0 prior to shrinking the `controller_count`. An initial attempt was made to implement this in a destroy time provisioner, however it caused issues when running `terraform destroy` to destroy the entire plan. This may be revisited at a later date. 116 | * Etcd running on the Control Plane currently goes over the public network due to k0sctl configuration limitaitons - each controllers respective `spec.k0s.spec.storage.peerAddress` parameters would require different values. The Cluster Vultr Firewall allows only Control plane nodes to access the Etcd port(`2380`). Will likely revisit this or look into a PR to K0sctl so that the private network may be used. 117 | * Helm Charts/Repositories are not removed from your cluster if removed from your Terraform configuration. The manifests must be manually removed from each controller in the `/var/lib/k0s/manifests/helm/` directory, and the resources/namespace manually deleted. This makes it less than ideal for continued deployments to your cluster, but great for bootstrapping core applications. A tool such as ArgoCD as used in the Helm example is recommended for long term deployments to your cluster. 118 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vultr = { 4 | source = "vultr/vultr" 5 | version = "2.3.2" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | cluster_name = "${var.cluster_name}-${random_id.cluster.hex}" 12 | public_keys = concat([vultr_ssh_key.provisioner.id], vultr_ssh_key.extra_public_keys.*.id) 13 | csi_provisioner_version = "v2.0.4" 14 | csi_attacher_version = "v3.0.2" 15 | csi_node_driver_registrar_version = "v2.0.1" 16 | k0sctl_controllers = [ 17 | for host in vultr_instance.control_plane : 18 | { 19 | role = "controller" 20 | installFlags = [ 21 | "--enable-cloud-provider=true" 22 | ] 23 | privateAddress = host.internal_ip 24 | ssh = { 25 | address = host.main_ip 26 | user = "root" 27 | port = 22 28 | } 29 | } 30 | ] 31 | k0sctl_workers = [ 32 | for host in vultr_instance.worker : 33 | { 34 | role = "worker" 35 | installFlags = [ 36 | "--enable-cloud-provider=true" 37 | ] 38 | privateAddress = host.internal_ip 39 | ssh = { 40 | address = host.main_ip 41 | user = "root" 42 | port = 22 43 | } 44 | } 45 | ] 46 | k0sctl_conf = { 47 | apiVersion = "k0sctl.k0sproject.io/v1beta1" 48 | kind = "Cluster" 49 | metadata = { 50 | name = local.cluster_name 51 | } 52 | spec = { 53 | hosts = concat(local.k0sctl_controllers, local.k0sctl_workers) 54 | k0s = { 55 | version = var.k0s_version 56 | config = { 57 | apiVersion = "k0s.k0sproject.io/v1beta1" 58 | kind = "Cluster" 59 | metadata = { 60 | name = local.cluster_name 61 | } 62 | spec = { 63 | extensions = { 64 | helm = { 65 | repositories = var.helm_repositories 66 | charts = var.helm_charts 67 | } 68 | } 69 | telemetry = { 70 | enabled = false 71 | } 72 | api = { 73 | port = 6443 74 | k0sApiPort = 9443 75 | externalAddress = vultr_load_balancer.control_plane_ha.ipv4 76 | address = vultr_load_balancer.control_plane_ha.ipv4 77 | sans = [ 78 | vultr_load_balancer.control_plane_ha.ipv4 79 | ] 80 | } 81 | network = { 82 | podCIDR = var.pod_cidr 83 | serviceCIDR = var.svc_cidr 84 | "provider" = "calico" 85 | calico = { 86 | mode = var.calico_mode 87 | } 88 | } 89 | podSecurityPolicy = { 90 | defaultPolicy = var.pod_sec_policy 91 | } 92 | images = { 93 | konnectivity = { 94 | image = "us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent" 95 | version = var.konnectivity_version 96 | } 97 | metricsserver = { 98 | image = "gcr.io/k8s-staging-metrics-server/metrics-server" 99 | version = var.metrics_server_version 100 | } 101 | kubeproxy = { 102 | image = "k8s.gcr.io/kube-proxy" 103 | version = var.kube_proxy_version 104 | } 105 | coredns = { 106 | image = "docker.io/coredns/coredns" 107 | version = var.core_dns_version 108 | } 109 | calico = { 110 | cni = { 111 | image = "calico/cni" 112 | version = var.calico_version 113 | } 114 | } 115 | } 116 | } 117 | } 118 | } 119 | } 120 | } 121 | config_sha256sum = sha256(tostring(jsonencode(local.k0sctl_conf))) 122 | } 123 | 124 | data "vultr_os" "cluster" { 125 | filter { 126 | name = "name" 127 | values = [var.cluster_os] 128 | } 129 | } 130 | 131 | resource "random_id" "cluster" { 132 | byte_length = 8 133 | } 134 | 135 | resource "vultr_ssh_key" "provisioner" { 136 | name = "Provisioner public key for k0s cluster ${random_id.cluster.hex}" 137 | ssh_key = var.provisioner_public_key 138 | } 139 | 140 | resource "vultr_ssh_key" "extra_public_keys" { 141 | count = length(var.extra_public_keys) 142 | name = "Public key for k0s cluster ${random_id.cluster.hex}" 143 | ssh_key = var.extra_public_keys[count.index] 144 | } 145 | 146 | resource "vultr_private_network" "cluster" { 147 | description = "Private Network for k0s cluster ${random_id.cluster.hex}" 148 | region = var.region 149 | v4_subnet = element(split("/", var.node_subnet), 0) 150 | v4_subnet_mask = element(split("/", var.node_subnet), 1) 151 | } 152 | 153 | resource "vultr_load_balancer" "control_plane_ha" { 154 | region = var.region 155 | label = "HA Control Plane Load Balancer for k0s cluster ${random_id.cluster.hex}" 156 | balancing_algorithm = var.ha_lb_algorithm 157 | private_network = vultr_private_network.cluster.id 158 | 159 | forwarding_rules { 160 | frontend_protocol = "tcp" 161 | frontend_port = 6443 162 | backend_protocol = "tcp" 163 | backend_port = 6443 164 | } 165 | 166 | dynamic "firewall_rules" { 167 | for_each = vultr_instance.worker 168 | iterator = instance 169 | content { 170 | port = 6443 171 | ip_type = "v4" 172 | source = "${instance.value["main_ip"]}/32" 173 | } 174 | } 175 | 176 | forwarding_rules { 177 | frontend_protocol = "tcp" 178 | frontend_port = 8132 179 | backend_protocol = "tcp" 180 | backend_port = 8132 181 | } 182 | 183 | dynamic "firewall_rules" { 184 | for_each = vultr_instance.worker 185 | iterator = instance 186 | content { 187 | port = 8132 188 | ip_type = "v4" 189 | source = "${instance.value["main_ip"]}/32" 190 | } 191 | } 192 | 193 | forwarding_rules { 194 | frontend_protocol = "tcp" 195 | frontend_port = 8133 196 | backend_protocol = "tcp" 197 | backend_port = 8133 198 | } 199 | 200 | dynamic "firewall_rules" { 201 | for_each = vultr_instance.worker 202 | iterator = instance 203 | content { 204 | port = 8133 205 | ip_type = "v4" 206 | source = "${instance.value["main_ip"]}/32" 207 | } 208 | } 209 | 210 | forwarding_rules { 211 | frontend_protocol = "tcp" 212 | frontend_port = 9443 213 | backend_protocol = "tcp" 214 | backend_port = 9443 215 | } 216 | 217 | dynamic "firewall_rules" { 218 | for_each = vultr_instance.control_plane 219 | iterator = instance 220 | content { 221 | port = 9443 222 | ip_type = "v4" 223 | source = "${instance.value["main_ip"]}/32" 224 | } 225 | } 226 | 227 | dynamic "firewall_rules" { 228 | for_each = var.control_plane_firewall_rules 229 | iterator = rule 230 | content { 231 | port = rule.value["port"] 232 | ip_type = rule.value["ip_type"] 233 | source = rule.value["source"] 234 | } 235 | } 236 | 237 | health_check { 238 | port = "6443" 239 | protocol = "tcp" 240 | response_timeout = var.ha_lb_health_response_timeout 241 | unhealthy_threshold = var.ha_lb_health_unhealthy_threshold 242 | check_interval = var.ha_lb_health_check_interval 243 | healthy_threshold = var.ha_lb_health_healthy_threshold 244 | } 245 | 246 | attached_instances = vultr_instance.control_plane.*.id 247 | } 248 | 249 | resource "vultr_firewall_group" "cluster" { 250 | description = "Firewall group for k0s cluster ${random_id.cluster.hex}" 251 | } 252 | 253 | resource "vultr_firewall_rule" "ssh" { 254 | count = var.allow_ssh ? 1 : 0 255 | firewall_group_id = vultr_firewall_group.cluster.id 256 | protocol = "tcp" 257 | ip_type = "v4" 258 | subnet = "0.0.0.0" 259 | subnet_size = 0 260 | port = "22" 261 | notes = "Allow SSH to all cluster nodes globally." 262 | } 263 | 264 | resource "vultr_firewall_rule" "etcd" { 265 | count = var.controller_count 266 | firewall_group_id = vultr_firewall_group.cluster.id 267 | protocol = "tcp" 268 | ip_type = "v4" 269 | subnet = vultr_instance.control_plane[count.index].main_ip 270 | subnet_size = 32 271 | port = "2380" 272 | notes = "Allow Etcd for Control Plane members." 273 | } 274 | 275 | resource "vultr_instance" "control_plane" { 276 | count = var.controller_count 277 | plan = var.controller_plan 278 | hostname = "${local.cluster_name}-controller-${count.index}" 279 | label = "${local.cluster_name}-controller-${count.index}" 280 | region = var.region 281 | os_id = data.vultr_os.cluster.id 282 | firewall_group_id = vultr_firewall_group.cluster.id 283 | private_network_ids = [vultr_private_network.cluster.id] 284 | ssh_key_ids = local.public_keys 285 | enable_ipv6 = var.enable_ipv6 286 | activation_email = var.activation_email 287 | ddos_protection = var.ddos_protection 288 | tag = var.tag 289 | 290 | connection { 291 | type = "ssh" 292 | user = "root" 293 | host = self.main_ip 294 | } 295 | 296 | provisioner "file" { 297 | source = "${path.module}/scripts/provision.sh" 298 | destination = "/tmp/provision.sh" 299 | } 300 | 301 | provisioner "remote-exec" { 302 | inline = [ 303 | "chmod +x /tmp/provision.sh", 304 | "/tmp/provision.sh ${self.internal_ip}", 305 | "rm -f /tmp/provision.sh" 306 | ] 307 | } 308 | } 309 | 310 | resource "vultr_instance" "worker" { 311 | count = var.worker_count 312 | plan = var.worker_plan 313 | hostname = "${local.cluster_name}-worker-${count.index}" 314 | label = "${local.cluster_name}-worker-${count.index}" 315 | region = var.region 316 | os_id = data.vultr_os.cluster.id 317 | firewall_group_id = vultr_firewall_group.cluster.id 318 | private_network_ids = [vultr_private_network.cluster.id] 319 | ssh_key_ids = local.public_keys 320 | enable_ipv6 = var.enable_ipv6 321 | activation_email = var.activation_email 322 | ddos_protection = var.ddos_protection 323 | tag = var.tag 324 | 325 | connection { 326 | type = "ssh" 327 | user = "root" 328 | host = self.main_ip 329 | } 330 | 331 | provisioner "file" { 332 | source = "${path.module}/scripts/provision.sh" 333 | destination = "/tmp/provision.sh" 334 | } 335 | 336 | provisioner "remote-exec" { 337 | inline = [ 338 | "chmod +x /tmp/provision.sh", 339 | "/tmp/provision.sh ${self.internal_ip}", 340 | "rm -f /tmp/provision.sh" 341 | ] 342 | } 343 | } 344 | 345 | resource "null_resource" "k0s" { 346 | depends_on = [ 347 | vultr_load_balancer.control_plane_ha, 348 | vultr_instance.control_plane, 349 | vultr_instance.worker 350 | ] 351 | 352 | triggers = { 353 | controller_count = var.controller_count 354 | worker_count = var.worker_count 355 | config = local.config_sha256sum 356 | } 357 | 358 | provisioner "local-exec" { 359 | command = <<-EOT 360 | cat <<-EOF > k0sctl.yaml 361 | ${yamlencode(local.k0sctl_conf)} 362 | EOF 363 | k0sctl apply 364 | 365 | EOT 366 | } 367 | } 368 | 369 | resource "null_resource" "vultr_extensions" { 370 | count = var.controller_count 371 | 372 | triggers = { 373 | api_key = var.cluster_vultr_api_key 374 | ccm_version = var.vultr_ccm_version 375 | csi_version = var.vultr_csi_version 376 | } 377 | 378 | connection { 379 | type = "ssh" 380 | user = "root" 381 | host = vultr_instance.control_plane[count.index].main_ip 382 | } 383 | 384 | provisioner "remote-exec" { 385 | inline = [ 386 | "mkdir -p /var/lib/k0s/manifests/vultr" 387 | ] 388 | } 389 | 390 | provisioner "file" { 391 | content = <<-EOT 392 | apiVersion: v1 393 | kind: Secret 394 | metadata: 395 | name: vultr-ccm 396 | namespace: kube-system 397 | stringData: 398 | api-key: "${var.cluster_vultr_api_key}" 399 | region: "${var.region}" 400 | --- 401 | apiVersion: v1 402 | kind: Secret 403 | metadata: 404 | name: vultr-csi 405 | namespace: kube-system 406 | stringData: 407 | api-key: "${var.cluster_vultr_api_key}" 408 | --- 409 | EOT 410 | destination = "/var/lib/k0s/manifests/vultr/vultr-api-key.yaml" 411 | } 412 | 413 | provisioner "file" { 414 | content = <<-EOT 415 | apiVersion: v1 416 | kind: ServiceAccount 417 | metadata: 418 | name: vultr-ccm 419 | namespace: kube-system 420 | --- 421 | apiVersion: rbac.authorization.k8s.io/v1 422 | kind: ClusterRole 423 | metadata: 424 | annotations: 425 | rbac.authorization.kubernetes.io/autoupdate: "true" 426 | name: system:vultr-ccm 427 | rules: 428 | - apiGroups: 429 | - "" 430 | resources: 431 | - events 432 | verbs: 433 | - create 434 | - patch 435 | - update 436 | - apiGroups: 437 | - "" 438 | resources: 439 | - nodes 440 | verbs: 441 | - '*' 442 | - apiGroups: 443 | - "" 444 | resources: 445 | - nodes/status 446 | verbs: 447 | - patch 448 | - apiGroups: 449 | - "" 450 | resources: 451 | - services 452 | verbs: 453 | - list 454 | - patch 455 | - update 456 | - watch 457 | - apiGroups: 458 | - "" 459 | resources: 460 | - services/status 461 | verbs: 462 | - list 463 | - patch 464 | - update 465 | - watch 466 | - apiGroups: 467 | - "" 468 | resources: 469 | - serviceaccounts 470 | verbs: 471 | - create 472 | - get 473 | - apiGroups: 474 | - "" 475 | resources: 476 | - persistentvolumes 477 | verbs: 478 | - get 479 | - list 480 | - update 481 | - watch 482 | - apiGroups: 483 | - "" 484 | resources: 485 | - endpoints 486 | verbs: 487 | - create 488 | - get 489 | - list 490 | - watch 491 | - update 492 | - apiGroups: 493 | - coordination.k8s.io 494 | resources: 495 | - leases 496 | verbs: 497 | - create 498 | - get 499 | - list 500 | - watch 501 | - update 502 | - apiGroups: 503 | - "" 504 | resources: 505 | - secrets 506 | verbs: 507 | - get 508 | - list 509 | - watch 510 | --- 511 | kind: ClusterRoleBinding 512 | apiVersion: rbac.authorization.k8s.io/v1 513 | metadata: 514 | name: system:vultr-ccm 515 | roleRef: 516 | apiGroup: rbac.authorization.k8s.io 517 | kind: ClusterRole 518 | name: system:vultr-ccm 519 | subjects: 520 | - kind: ServiceAccount 521 | name: vultr-ccm 522 | namespace: kube-system 523 | --- 524 | apiVersion: apps/v1 525 | kind: Deployment 526 | metadata: 527 | name: vultr-ccm 528 | labels: 529 | app: vultr-ccm 530 | namespace: kube-system 531 | spec: 532 | replicas: 1 533 | selector: 534 | matchLabels: 535 | app: vultr-ccm 536 | template: 537 | metadata: 538 | labels: 539 | app: vultr-ccm 540 | spec: 541 | serviceAccountName: vultr-ccm 542 | tolerations: 543 | - key: "CriticalAddonsOnly" 544 | operator: "Exists" 545 | - key: "node.cloudprovider.kubernetes.io/uninitialized" 546 | value: "true" 547 | effect: "NoSchedule" 548 | - key: node.kubernetes.io/not-ready 549 | operator: Exists 550 | effect: NoSchedule 551 | - key: node.kubernetes.io/unreachable 552 | operator: Exists 553 | effect: NoSchedule 554 | hostNetwork: true 555 | containers: 556 | - image: vultr/vultr-cloud-controller-manager:${var.vultr_ccm_version} 557 | imagePullPolicy: Always 558 | name: vultr-cloud-controller-manager 559 | command: 560 | - "/vultr-cloud-controller-manager" 561 | - "--cloud-provider=vultr" 562 | - "--allow-untagged-cloud=true" 563 | - "--authentication-skip-lookup=true" 564 | - "--v=3" 565 | env: 566 | - name: VULTR_API_KEY 567 | valueFrom: 568 | secretKeyRef: 569 | name: vultr-ccm 570 | key: api-key 571 | EOT 572 | destination = "/var/lib/k0s/manifests/vultr/vultr-ccm-${var.vultr_ccm_version}.yaml" 573 | } 574 | 575 | provisioner "file" { 576 | content = <<-EOT 577 | #################### 578 | ### Storage Classes 579 | #################### 580 | apiVersion: storage.k8s.io/v1beta1 581 | kind: CSIDriver 582 | metadata: 583 | name: block.csi.vultr.com 584 | spec: 585 | attachRequired: true 586 | podInfoOnMount: true 587 | 588 | --- 589 | kind: StorageClass 590 | apiVersion: storage.k8s.io/v1 591 | metadata: 592 | name: vultr-block-storage 593 | namespace: kube-system 594 | annotations: 595 | storageclass.kubernetes.io/is-default-class: "true" 596 | provisioner: block.csi.vultr.com 597 | 598 | --- 599 | kind: StorageClass 600 | apiVersion: storage.k8s.io/v1 601 | metadata: 602 | name: vultr-block-storage-retain 603 | namespace: kube-system 604 | provisioner: block.csi.vultr.com 605 | reclaimPolicy: Retain 606 | 607 | ################### 608 | ### CSI Controller 609 | ################### 610 | --- 611 | kind: StatefulSet 612 | apiVersion: apps/v1 613 | metadata: 614 | name: csi-vultr-controller 615 | namespace: kube-system 616 | spec: 617 | serviceName: "csi-vultr" 618 | replicas: 1 619 | selector: 620 | matchLabels: 621 | app: csi-vultr-controller 622 | template: 623 | metadata: 624 | labels: 625 | app: csi-vultr-controller 626 | role: csi-vultr 627 | spec: 628 | serviceAccountName: csi-vultr-controller-sa 629 | containers: 630 | - name: csi-provisioner 631 | image: quay.io/k8scsi/csi-provisioner:${local.csi_provisioner_version} 632 | args: 633 | - "--volume-name-prefix=pvc" 634 | - "--volume-name-uuid-length=16" 635 | - "--csi-address=$(ADDRESS)" 636 | - "--v=5" 637 | - "--default-fstype=ext4" 638 | env: 639 | - name: ADDRESS 640 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 641 | imagePullPolicy: "Always" 642 | volumeMounts: 643 | - name: socket-dir 644 | mountPath: /var/lib/csi/sockets/pluginproxy/ 645 | - name: csi-attacher 646 | image: quay.io/k8scsi/csi-attacher:${local.csi_attacher_version} 647 | args: 648 | - "--v=5" 649 | - "--csi-address=$(ADDRESS)" 650 | env: 651 | - name: ADDRESS 652 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 653 | imagePullPolicy: "Always" 654 | volumeMounts: 655 | - name: socket-dir 656 | mountPath: /var/lib/csi/sockets/pluginproxy/ 657 | - name: csi-vultr-plugin 658 | image: ${var.vultr_csi_image}:${var.vultr_csi_version} 659 | args: 660 | - "--endpoint=$(CSI_ENDPOINT)" 661 | - "--token=$(VULTR_API_KEY)" 662 | env: 663 | - name: CSI_ENDPOINT 664 | value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock 665 | - name: VULTR_API_KEY 666 | valueFrom: 667 | secretKeyRef: 668 | name: vultr-csi 669 | key: api-key 670 | imagePullPolicy: "Always" 671 | volumeMounts: 672 | - name: socket-dir 673 | mountPath: /var/lib/csi/sockets/pluginproxy/ 674 | volumes: 675 | - name: socket-dir 676 | emptyDir: { } 677 | 678 | --- 679 | apiVersion: v1 680 | kind: ServiceAccount 681 | metadata: 682 | name: csi-vultr-controller-sa 683 | namespace: kube-system 684 | 685 | ## Attacher Role + Binding 686 | --- 687 | kind: ClusterRole 688 | apiVersion: rbac.authorization.k8s.io/v1 689 | metadata: 690 | name: csi-vultr-attacher-role 691 | namespace: kube-system 692 | rules: 693 | - apiGroups: [ "" ] 694 | resources: [ "persistentvolumes" ] 695 | verbs: [ "get", "list", "watch", "update", "patch" ] 696 | - apiGroups: [ "" ] 697 | resources: [ "nodes" ] 698 | verbs: [ "get", "list", "watch" ] 699 | - apiGroups: [ "storage.k8s.io" ] 700 | resources: [ "csinodes" ] 701 | verbs: [ "get", "list", "watch" ] 702 | - apiGroups: [ "storage.k8s.io" ] 703 | resources: [ "volumeattachments" ] 704 | verbs: [ "get", "list", "watch", "update", "patch" ] 705 | - apiGroups: [ "storage.k8s.io" ] 706 | resources: [ "volumeattachments/status" ] 707 | verbs: [ "patch" ] 708 | 709 | --- 710 | kind: ClusterRoleBinding 711 | apiVersion: rbac.authorization.k8s.io/v1 712 | metadata: 713 | name: csi-controller-attacher-binding 714 | namespace: kube-system 715 | subjects: 716 | - kind: ServiceAccount 717 | name: csi-vultr-controller-sa 718 | namespace: kube-system 719 | roleRef: 720 | kind: ClusterRole 721 | name: csi-vultr-attacher-role 722 | apiGroup: rbac.authorization.k8s.io 723 | 724 | ## Provisioner Role + Binding 725 | --- 726 | kind: ClusterRole 727 | apiVersion: rbac.authorization.k8s.io/v1 728 | metadata: 729 | name: csi-vultr-provisioner-role 730 | namespace: kube-system 731 | rules: 732 | - apiGroups: [ "" ] 733 | resources: [ "secrets" ] 734 | verbs: [ "get", "list" ] 735 | - apiGroups: [ "" ] 736 | resources: [ "persistentvolumes" ] 737 | verbs: [ "get", "list", "watch", "create", "delete" ] 738 | - apiGroups: [ "" ] 739 | resources: [ "persistentvolumeclaims" ] 740 | verbs: [ "get", "list", "watch", "update" ] 741 | - apiGroups: [ "storage.k8s.io" ] 742 | resources: [ "storageclasses" ] 743 | verbs: [ "get", "list", "watch" ] 744 | - apiGroups: [ "storage.k8s.io" ] 745 | resources: [ "csinodes" ] 746 | verbs: [ "get", "list", "watch" ] 747 | - apiGroups: [ "" ] 748 | resources: [ "events" ] 749 | verbs: [ "list", "watch", "create", "update", "patch" ] 750 | - apiGroups: [ "" ] 751 | resources: [ "nodes" ] 752 | verbs: [ "get", "list", "watch" ] 753 | - apiGroups: [ "storage.k8s.io" ] 754 | resources: [ "volumeattachments" ] 755 | verbs: [ "get", "list", "watch" ] 756 | 757 | --- 758 | kind: ClusterRoleBinding 759 | apiVersion: rbac.authorization.k8s.io/v1 760 | metadata: 761 | name: csi-controller-provisioner-binding 762 | namespace: kube-system 763 | subjects: 764 | - kind: ServiceAccount 765 | name: csi-vultr-controller-sa 766 | namespace: kube-system 767 | roleRef: 768 | kind: ClusterRole 769 | name: csi-vultr-provisioner-role 770 | apiGroup: rbac.authorization.k8s.io 771 | 772 | 773 | ############ 774 | ## CSI Node 775 | ############ 776 | --- 777 | kind: DaemonSet 778 | apiVersion: apps/v1 779 | metadata: 780 | name: csi-vultr-node 781 | namespace: kube-system 782 | spec: 783 | selector: 784 | matchLabels: 785 | app: csi-vultr-node 786 | template: 787 | metadata: 788 | labels: 789 | app: csi-vultr-node 790 | role: csi-vultr 791 | spec: 792 | serviceAccountName: csi-vultr-node-sa 793 | hostNetwork: true 794 | containers: 795 | - name: driver-registrar 796 | image: quay.io/k8scsi/csi-node-driver-registrar:${local.csi_node_driver_registrar_version} 797 | args: 798 | - "--v=5" 799 | - "--csi-address=$(ADDRESS)" 800 | - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" 801 | env: 802 | - name: ADDRESS 803 | value: /csi/csi.sock 804 | - name: DRIVER_REG_SOCK_PATH 805 | value: /var/lib/k0s/kubelet/plugins/block.csi.vultr.com/csi.sock 806 | - name: KUBE_NODE_NAME 807 | valueFrom: 808 | fieldRef: 809 | fieldPath: spec.nodeName 810 | volumeMounts: 811 | - name: plugin-dir 812 | mountPath: /csi/ 813 | - name: registration-dir 814 | mountPath: /registration/ 815 | - name: csi-vultr-plugin 816 | image: ${var.vultr_csi_image}:${var.vultr_csi_version} 817 | args: 818 | - "--endpoint=$(CSI_ENDPOINT)" 819 | env: 820 | - name: CSI_ENDPOINT 821 | value: unix:///csi/csi.sock 822 | imagePullPolicy: "Always" 823 | securityContext: 824 | privileged: true 825 | capabilities: 826 | add: [ "SYS_ADMIN" ] 827 | allowPrivilegeEscalation: true 828 | volumeMounts: 829 | - name: plugin-dir 830 | mountPath: /csi 831 | - name: pods-mount-dir 832 | mountPath: /var/lib/k0s/kubelet 833 | mountPropagation: "Bidirectional" 834 | - mountPath: /dev 835 | name: device-dir 836 | volumes: 837 | - name: registration-dir 838 | hostPath: 839 | path: /var/lib/k0s/kubelet/plugins_registry/ 840 | type: DirectoryOrCreate 841 | - name: kubelet-dir 842 | hostPath: 843 | path: /var/lib/k0s/kubelet 844 | type: Directory 845 | - name: plugin-dir 846 | hostPath: 847 | path: /var/lib/k0s/kubelet/plugins/block.csi.vultr.com 848 | type: DirectoryOrCreate 849 | - name: pods-mount-dir 850 | hostPath: 851 | path: /var/lib/k0s/kubelet 852 | type: Directory 853 | - name: device-dir 854 | hostPath: 855 | path: /dev 856 | - name: udev-rules-etc 857 | hostPath: 858 | path: /etc/udev 859 | type: Directory 860 | - name: udev-rules-lib 861 | hostPath: 862 | path: /lib/udev 863 | type: Directory 864 | - name: udev-socket 865 | hostPath: 866 | path: /run/udev 867 | type: Directory 868 | - name: sys 869 | hostPath: 870 | path: /sys 871 | type: Directory 872 | 873 | --- 874 | apiVersion: v1 875 | kind: ServiceAccount 876 | metadata: 877 | name: csi-vultr-node-sa 878 | namespace: kube-system 879 | 880 | --- 881 | kind: ClusterRoleBinding 882 | apiVersion: rbac.authorization.k8s.io/v1 883 | metadata: 884 | name: driver-registrar-binding 885 | namespace: kube-system 886 | subjects: 887 | - kind: ServiceAccount 888 | name: csi-vultr-node-sa 889 | namespace: kube-system 890 | roleRef: 891 | kind: ClusterRole 892 | name: csi-vultr-node-driver-registrar-role 893 | apiGroup: rbac.authorization.k8s.io 894 | 895 | --- 896 | kind: ClusterRole 897 | apiVersion: rbac.authorization.k8s.io/v1 898 | metadata: 899 | name: csi-vultr-node-driver-registrar-role 900 | namespace: kube-system 901 | rules: 902 | - apiGroups: [ "" ] 903 | resources: [ "events" ] 904 | verbs: [ "get", "list", "watch", "create", "update", "patch" ] 905 | EOT 906 | destination = "/var/lib/k0s/manifests/vultr/vultr-csi-latest.yaml" 907 | } 908 | } 909 | 910 | resource "null_resource" "kubeconfig" { 911 | depends_on = [ 912 | null_resource.k0s 913 | ] 914 | 915 | triggers = { 916 | cluster = null_resource.k0s.id 917 | } 918 | 919 | count = var.write_kubeconfig ? 1 : 0 920 | 921 | provisioner "local-exec" { 922 | command = "k0sctl kubeconfig > admin-${terraform.workspace}.conf" 923 | } 924 | } 925 | --------------------------------------------------------------------------------