├── LICENSE
├── README.md
├── apiserver.tf
├── bootstrap.tf
├── butane
└── controller.yaml
├── controllers.tf
├── image.tf
├── ingress.tf
├── network.tf
├── outputs.tf
├── ssh.tf
├── variables.tf
├── versions.tf
├── workers.tf
└── workers
├── butane
└── worker.yaml
├── image.tf
├── outputs.tf
├── target_pool.tf
├── variables.tf
├── versions.tf
└── workers.tf
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 Typhoon Authors
4 | Copyright (c) 2017 Dalton Hubble
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
23 |
24 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Typhoon
2 |
3 | Typhoon is a minimal and free Kubernetes distribution.
4 |
5 | * Minimal, stable base Kubernetes distribution
6 | * Declarative infrastructure and configuration
7 | * Free (freedom and cost) and privacy-respecting
8 | * Practical for labs, datacenters, and clouds
9 |
10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components.
11 |
12 | ## Features
13 |
14 | * Kubernetes v1.26.0 (upstream)
15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/)
19 |
20 | ## Docs
21 |
22 | Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/flatcar-linux/google-cloud/).
23 |
24 |
--------------------------------------------------------------------------------
/apiserver.tf:
--------------------------------------------------------------------------------
1 | # TCP Proxy load balancer DNS record
2 | resource "google_dns_record_set" "apiserver" {
3 | # DNS Zone name where record should be created
4 | managed_zone = var.dns_zone_name
5 |
6 | # DNS record
7 | name = format("%s.%s.", var.cluster_name, var.dns_zone)
8 | type = "A"
9 | ttl = 300
10 |
11 | # IPv4 address of apiserver TCP Proxy load balancer
12 | rrdatas = [google_compute_global_address.apiserver-ipv4.address]
13 | }
14 |
15 | # Static IPv4 address for the TCP Proxy Load Balancer
16 | resource "google_compute_global_address" "apiserver-ipv4" {
17 | name = "${var.cluster_name}-apiserver-ip"
18 | ip_version = "IPV4"
19 | }
20 |
21 | # Forward IPv4 TCP traffic to the TCP proxy load balancer
22 | resource "google_compute_global_forwarding_rule" "apiserver" {
23 | name = "${var.cluster_name}-apiserver"
24 | ip_address = google_compute_global_address.apiserver-ipv4.address
25 | ip_protocol = "TCP"
26 | port_range = "443"
27 | target = google_compute_target_tcp_proxy.apiserver.self_link
28 | }
29 |
30 | # Global TCP Proxy Load Balancer for apiservers
31 | resource "google_compute_target_tcp_proxy" "apiserver" {
32 | name = "${var.cluster_name}-apiserver"
33 | description = "Distribute TCP load across ${var.cluster_name} controllers"
34 | backend_service = google_compute_backend_service.apiserver.self_link
35 | }
36 |
37 | # Global backend service backed by unmanaged instance groups
38 | resource "google_compute_backend_service" "apiserver" {
39 | name = "${var.cluster_name}-apiserver"
40 | description = "${var.cluster_name} apiserver service"
41 |
42 | protocol = "TCP"
43 | port_name = "apiserver"
44 | session_affinity = "NONE"
45 | timeout_sec = "300"
46 |
47 | # controller(s) spread across zonal instance groups
48 | dynamic "backend" {
49 | for_each = google_compute_instance_group.controllers
50 | content {
51 | group = backend.value.self_link
52 | }
53 | }
54 |
55 | health_checks = [google_compute_health_check.apiserver.self_link]
56 | }
57 |
58 | # Instance group of heterogeneous (unmanged) controller instances
59 | resource "google_compute_instance_group" "controllers" {
60 | count = min(var.controller_count, length(local.zones))
61 |
62 | name = format("%s-controllers-%s", var.cluster_name, element(local.zones, count.index))
63 | zone = element(local.zones, count.index)
64 |
65 | named_port {
66 | name = "apiserver"
67 | port = "6443"
68 | }
69 |
70 | # add instances in the zone into the instance group
71 | instances = matchkeys(
72 | google_compute_instance.controllers.*.self_link,
73 | google_compute_instance.controllers.*.zone,
74 | [element(local.zones, count.index)],
75 | )
76 | }
77 |
78 | # Health check for kube-apiserver
79 | resource "google_compute_health_check" "apiserver" {
80 | name = "${var.cluster_name}-apiserver-health"
81 | description = "Health check for kube-apiserver"
82 |
83 | timeout_sec = 5
84 | check_interval_sec = 5
85 |
86 | healthy_threshold = 1
87 | unhealthy_threshold = 3
88 |
89 | ssl_health_check {
90 | port = "6443"
91 | }
92 | }
93 |
94 |
--------------------------------------------------------------------------------
/bootstrap.tf:
--------------------------------------------------------------------------------
1 | # Kubernetes assets (kubeconfig, manifests)
2 | module "bootstrap" {
3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=4c660afd871a5f87f6dd84f01504a0ed1bdcc94f"
4 |
5 | cluster_name = var.cluster_name
6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
7 | etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")]
8 | networking = var.networking
9 | network_mtu = 1440
10 | pod_cidr = var.pod_cidr
11 | service_cidr = var.service_cidr
12 | cluster_domain_suffix = var.cluster_domain_suffix
13 | enable_reporting = var.enable_reporting
14 | enable_aggregation = var.enable_aggregation
15 | daemonset_tolerations = var.daemonset_tolerations
16 |
17 | // temporary
18 | external_apiserver_port = 443
19 | }
20 |
21 |
--------------------------------------------------------------------------------
/butane/controller.yaml:
--------------------------------------------------------------------------------
1 | variant: flatcar
2 | version: 1.0.0
3 | systemd:
4 | units:
5 | - name: etcd-member.service
6 | enabled: true
7 | contents: |
8 | [Unit]
9 | Description=etcd (System Container)
10 | Documentation=https://github.com/etcd-io/etcd
11 | Requires=docker.service
12 | After=docker.service
13 | [Service]
14 | Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.6
15 | ExecStartPre=/usr/bin/docker run -d \
16 | --name etcd \
17 | --network host \
18 | --env-file /etc/etcd/etcd.env \
19 | --user 232:232 \
20 | --volume /etc/ssl/etcd:/etc/ssl/certs:ro \
21 | --volume /var/lib/etcd:/var/lib/etcd:rw \
22 | $${ETCD_IMAGE}
23 | ExecStart=docker logs -f etcd
24 | ExecStop=docker stop etcd
25 | ExecStopPost=docker rm etcd
26 | Restart=always
27 | RestartSec=10s
28 | TimeoutStartSec=0
29 | LimitNOFILE=40000
30 | [Install]
31 | WantedBy=multi-user.target
32 | - name: docker.service
33 | enabled: true
34 | - name: locksmithd.service
35 | mask: true
36 | - name: wait-for-dns.service
37 | enabled: true
38 | contents: |
39 | [Unit]
40 | Description=Wait for DNS entries
41 | Wants=systemd-resolved.service
42 | Before=kubelet.service
43 | [Service]
44 | Type=oneshot
45 | RemainAfterExit=true
46 | ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
47 | [Install]
48 | RequiredBy=kubelet.service
49 | RequiredBy=etcd-member.service
50 | - name: kubelet.service
51 | enabled: true
52 | contents: |
53 | [Unit]
54 | Description=Kubelet (System Container)
55 | Requires=docker.service
56 | After=docker.service
57 | Wants=rpc-statd.service
58 | [Service]
59 | Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.26.0
60 | ExecStartPre=/bin/mkdir -p /etc/cni/net.d
61 | ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
62 | ExecStartPre=/bin/mkdir -p /opt/cni/bin
63 | ExecStartPre=/bin/mkdir -p /var/lib/calico
64 | ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
65 | ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
66 | ExecStartPre=/usr/bin/docker run -d \
67 | --name kubelet \
68 | --privileged \
69 | --pid host \
70 | --network host \
71 | -v /etc/cni/net.d:/etc/cni/net.d:ro \
72 | -v /etc/kubernetes:/etc/kubernetes:ro \
73 | -v /etc/machine-id:/etc/machine-id:ro \
74 | -v /usr/lib/os-release:/etc/os-release:ro \
75 | -v /lib/modules:/lib/modules:ro \
76 | -v /run:/run \
77 | -v /sys/fs/cgroup:/sys/fs/cgroup:ro \
78 | -v /var/lib/calico:/var/lib/calico:ro \
79 | -v /var/lib/containerd:/var/lib/containerd \
80 | -v /var/lib/kubelet:/var/lib/kubelet:rshared \
81 | -v /var/log:/var/log \
82 | -v /opt/cni/bin:/opt/cni/bin \
83 | $${KUBELET_IMAGE} \
84 | --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
85 | --config=/etc/kubernetes/kubelet.yaml \
86 | --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
87 | --kubeconfig=/var/lib/kubelet/kubeconfig \
88 | --node-labels=node.kubernetes.io/controller="true" \
89 | --register-with-taints=node-role.kubernetes.io/controller=:NoSchedule
90 | ExecStart=docker logs -f kubelet
91 | ExecStop=docker stop kubelet
92 | ExecStopPost=docker rm kubelet
93 | Restart=always
94 | RestartSec=10
95 | [Install]
96 | WantedBy=multi-user.target
97 | - name: bootstrap.service
98 | contents: |
99 | [Unit]
100 | Description=Kubernetes control plane
101 | Wants=docker.service
102 | After=docker.service
103 | ConditionPathExists=!/opt/bootstrap/bootstrap.done
104 | [Service]
105 | Type=oneshot
106 | RemainAfterExit=true
107 | WorkingDirectory=/opt/bootstrap
108 | Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.26.0
109 | ExecStart=/usr/bin/docker run \
110 | -v /etc/kubernetes/pki:/etc/kubernetes/pki:ro \
111 | -v /opt/bootstrap/assets:/assets:ro \
112 | -v /opt/bootstrap/apply:/apply:ro \
113 | --entrypoint=/apply \
114 | $${KUBELET_IMAGE}
115 | ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done
116 | [Install]
117 | WantedBy=multi-user.target
118 | storage:
119 | directories:
120 | - path: /var/lib/etcd
121 | mode: 0700
122 | overwrite: true
123 | files:
124 | - path: /etc/kubernetes/kubeconfig
125 | mode: 0644
126 | contents:
127 | inline: |
128 | ${kubeconfig}
129 | - path: /etc/kubernetes/kubelet.yaml
130 | contents:
131 | inline: |
132 | apiVersion: kubelet.config.k8s.io/v1beta1
133 | kind: KubeletConfiguration
134 | authentication:
135 | anonymous:
136 | enabled: false
137 | webhook:
138 | enabled: true
139 | x509:
140 | clientCAFile: /etc/kubernetes/ca.crt
141 | authorization:
142 | mode: Webhook
143 | cgroupDriver: systemd
144 | clusterDNS:
145 | - ${cluster_dns_service_ip}
146 | clusterDomain: ${cluster_domain_suffix}
147 | healthzPort: 0
148 | rotateCertificates: true
149 | shutdownGracePeriod: 45s
150 | shutdownGracePeriodCriticalPods: 30s
151 | staticPodPath: /etc/kubernetes/manifests
152 | readOnlyPort: 0
153 | resolvConf: /run/systemd/resolve/resolv.conf
154 | volumePluginDir: /var/lib/kubelet/volumeplugins
155 | - path: /opt/bootstrap/layout
156 | mode: 0544
157 | contents:
158 | inline: |
159 | #!/bin/bash -e
160 | mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking
161 | awk '/#####/ {filename=$2; next} {print > filename}' assets
162 | mkdir -p /etc/ssl/etcd/etcd
163 | mkdir -p /etc/kubernetes/pki
164 | mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/
165 | mv tls/etcd/etcd-client* /etc/kubernetes/pki/
166 | chown -R etcd:etcd /etc/ssl/etcd
167 | chmod -R 500 /etc/ssl/etcd
168 | chmod -R 700 /var/lib/etcd
169 | mv auth/* /etc/kubernetes/pki/
170 | mv tls/k8s/* /etc/kubernetes/pki/
171 | mkdir -p /etc/kubernetes/manifests
172 | mv static-manifests/* /etc/kubernetes/manifests/
173 | mkdir -p /opt/bootstrap/assets
174 | mv manifests /opt/bootstrap/assets/manifests
175 | mv manifests-networking/* /opt/bootstrap/assets/manifests/
176 | rm -rf assets auth static-manifests tls manifests-networking
177 | - path: /opt/bootstrap/apply
178 | mode: 0544
179 | contents:
180 | inline: |
181 | #!/bin/bash -e
182 | export KUBECONFIG=/etc/kubernetes/pki/admin.conf
183 | until kubectl version; do
184 | echo "Waiting for static pod control plane"
185 | sleep 5
186 | done
187 | until kubectl apply -f /assets/manifests -R; do
188 | echo "Retry applying manifests"
189 | sleep 5
190 | done
191 | - path: /etc/systemd/logind.conf.d/inhibitors.conf
192 | contents:
193 | inline: |
194 | [Login]
195 | InhibitDelayMaxSec=45s
196 | - path: /etc/sysctl.d/max-user-watches.conf
197 | mode: 0644
198 | contents:
199 | inline: |
200 | fs.inotify.max_user_watches=16184
201 | - path: /etc/etcd/etcd.env
202 | mode: 0644
203 | contents:
204 | inline: |
205 | ETCD_NAME=${etcd_name}
206 | ETCD_DATA_DIR=/var/lib/etcd
207 | ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
208 | ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
209 | ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
210 | ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
211 | ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
212 | ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
213 | ETCD_STRICT_RECONFIG_CHECK=true
214 | ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
215 | ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
216 | ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
217 | ETCD_CLIENT_CERT_AUTH=true
218 | ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
219 | ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
220 | ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
221 | ETCD_PEER_CLIENT_CERT_AUTH=true
222 | passwd:
223 | users:
224 | - name: core
225 | ssh_authorized_keys:
226 | - "${ssh_authorized_key}"
227 |
--------------------------------------------------------------------------------
/controllers.tf:
--------------------------------------------------------------------------------
1 | # Discrete DNS records for each controller's private IPv4 for etcd usage
2 | resource "google_dns_record_set" "etcds" {
3 | count = var.controller_count
4 |
5 | # DNS Zone name where record should be created
6 | managed_zone = var.dns_zone_name
7 |
8 | # DNS record
9 | name = format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone)
10 | type = "A"
11 | ttl = 300
12 |
13 | # private IPv4 address for etcd
14 | rrdatas = [google_compute_instance.controllers.*.network_interface.0.network_ip[count.index]]
15 | }
16 |
17 | # Zones in the region
18 | data "google_compute_zones" "all" {
19 | region = var.region
20 | }
21 |
22 | locals {
23 | zones = data.google_compute_zones.all.names
24 |
25 | controllers_ipv4_public = google_compute_instance.controllers.*.network_interface.0.access_config.0.nat_ip
26 | }
27 |
28 | # Controller instances
29 | resource "google_compute_instance" "controllers" {
30 | count = var.controller_count
31 |
32 | name = "${var.cluster_name}-controller-${count.index}"
33 | # use a zone in the region and wrap around (e.g. controllers > zones)
34 | zone = element(local.zones, count.index)
35 | machine_type = var.controller_type
36 |
37 | metadata = {
38 | user-data = data.ct_config.controllers.*.rendered[count.index]
39 | }
40 |
41 | boot_disk {
42 | auto_delete = true
43 |
44 | initialize_params {
45 | image = data.google_compute_image.flatcar-linux.self_link
46 | size = var.disk_size
47 | }
48 | }
49 |
50 | network_interface {
51 | network = google_compute_network.network.name
52 |
53 | # Ephemeral external IP
54 | access_config {
55 | }
56 | }
57 |
58 | can_ip_forward = true
59 | tags = ["${var.cluster_name}-controller"]
60 |
61 | lifecycle {
62 | ignore_changes = [
63 | metadata,
64 | boot_disk[0].initialize_params
65 | ]
66 | }
67 | }
68 |
69 | # Flatcar Linux controllers
70 | data "ct_config" "controllers" {
71 | count = var.controller_count
72 | content = templatefile("${path.module}/butane/controller.yaml", {
73 | # Cannot use cyclic dependencies on controllers or their DNS records
74 | etcd_name = "etcd${count.index}"
75 | etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
76 | # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
77 | etcd_initial_cluster = join(",", [
78 | for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380"
79 | ])
80 | kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet)
81 | ssh_authorized_key = var.ssh_authorized_key
82 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
83 | cluster_domain_suffix = var.cluster_domain_suffix
84 | })
85 | strict = true
86 | snippets = var.controller_snippets
87 | }
88 |
--------------------------------------------------------------------------------
/image.tf:
--------------------------------------------------------------------------------
1 | # Flatcar Linux most recent image from channel
2 | data "google_compute_image" "flatcar-linux" {
3 | project = "kinvolk-public"
4 | family = var.os_image
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/ingress.tf:
--------------------------------------------------------------------------------
1 | # Static IPv4 address for Ingress Load Balancing
2 | resource "google_compute_global_address" "ingress-ipv4" {
3 | name = "${var.cluster_name}-ingress-ipv4"
4 | ip_version = "IPV4"
5 | }
6 |
7 | # Static IPv6 address for Ingress Load Balancing
8 | resource "google_compute_global_address" "ingress-ipv6" {
9 | name = "${var.cluster_name}-ingress-ipv6"
10 | ip_version = "IPV6"
11 | }
12 |
13 | # Forward IPv4 TCP traffic to the HTTP proxy load balancer
14 | # Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
15 | resource "google_compute_global_forwarding_rule" "ingress-http-ipv4" {
16 | name = "${var.cluster_name}-ingress-http-ipv4"
17 | ip_address = google_compute_global_address.ingress-ipv4.address
18 | ip_protocol = "TCP"
19 | port_range = "80"
20 | target = google_compute_target_http_proxy.ingress-http.self_link
21 | }
22 |
23 | # Forward IPv4 TCP traffic to the TCP proxy load balancer
24 | resource "google_compute_global_forwarding_rule" "ingress-https-ipv4" {
25 | name = "${var.cluster_name}-ingress-https-ipv4"
26 | ip_address = google_compute_global_address.ingress-ipv4.address
27 | ip_protocol = "TCP"
28 | port_range = "443"
29 | target = google_compute_target_tcp_proxy.ingress-https.self_link
30 | }
31 |
32 | # Forward IPv6 TCP traffic to the HTTP proxy load balancer
33 | # Google Cloud does not allow TCP proxies for port 80. Must use HTTP proxy.
34 | resource "google_compute_global_forwarding_rule" "ingress-http-ipv6" {
35 | name = "${var.cluster_name}-ingress-http-ipv6"
36 | ip_address = google_compute_global_address.ingress-ipv6.address
37 | ip_protocol = "TCP"
38 | port_range = "80"
39 | target = google_compute_target_http_proxy.ingress-http.self_link
40 | }
41 |
42 | # Forward IPv6 TCP traffic to the TCP proxy load balancer
43 | resource "google_compute_global_forwarding_rule" "ingress-https-ipv6" {
44 | name = "${var.cluster_name}-ingress-https-ipv6"
45 | ip_address = google_compute_global_address.ingress-ipv6.address
46 | ip_protocol = "TCP"
47 | port_range = "443"
48 | target = google_compute_target_tcp_proxy.ingress-https.self_link
49 | }
50 |
51 | # HTTP proxy load balancer for ingress controllers
52 | resource "google_compute_target_http_proxy" "ingress-http" {
53 | name = "${var.cluster_name}-ingress-http"
54 | description = "Distribute HTTP load across ${var.cluster_name} workers"
55 | url_map = google_compute_url_map.ingress-http.self_link
56 | }
57 |
58 | # TCP proxy load balancer for ingress controllers
59 | resource "google_compute_target_tcp_proxy" "ingress-https" {
60 | name = "${var.cluster_name}-ingress-https"
61 | description = "Distribute HTTPS load across ${var.cluster_name} workers"
62 | backend_service = google_compute_backend_service.ingress-https.self_link
63 | }
64 |
65 | # HTTP URL Map (required)
66 | resource "google_compute_url_map" "ingress-http" {
67 | name = "${var.cluster_name}-ingress-http"
68 |
69 | # Do not add host/path rules for applications here. Use Ingress resources.
70 | default_service = google_compute_backend_service.ingress-http.self_link
71 | }
72 |
73 | # Backend service backed by managed instance group of workers
74 | resource "google_compute_backend_service" "ingress-http" {
75 | name = "${var.cluster_name}-ingress-http"
76 | description = "${var.cluster_name} ingress service"
77 |
78 | protocol = "HTTP"
79 | port_name = "http"
80 | session_affinity = "NONE"
81 | timeout_sec = "60"
82 |
83 | backend {
84 | group = module.workers.instance_group
85 | }
86 |
87 | health_checks = [google_compute_health_check.ingress.self_link]
88 | }
89 |
90 | # Backend service backed by managed instance group of workers
91 | resource "google_compute_backend_service" "ingress-https" {
92 | name = "${var.cluster_name}-ingress-https"
93 | description = "${var.cluster_name} ingress service"
94 |
95 | protocol = "TCP"
96 | port_name = "https"
97 | session_affinity = "NONE"
98 | timeout_sec = "60"
99 |
100 | backend {
101 | group = module.workers.instance_group
102 | }
103 |
104 | health_checks = [google_compute_health_check.ingress.self_link]
105 | }
106 |
107 | # Ingress HTTP Health Check
108 | resource "google_compute_health_check" "ingress" {
109 | name = "${var.cluster_name}-ingress-health"
110 | description = "Health check for Ingress controller"
111 |
112 | timeout_sec = 5
113 | check_interval_sec = 5
114 |
115 | healthy_threshold = 2
116 | unhealthy_threshold = 4
117 |
118 | http_health_check {
119 | port = 10254
120 | request_path = "/healthz"
121 | }
122 | }
123 |
124 |
--------------------------------------------------------------------------------
/network.tf:
--------------------------------------------------------------------------------
1 | resource "google_compute_network" "network" {
2 | name = var.cluster_name
3 | description = "Network for the ${var.cluster_name} cluster"
4 | auto_create_subnetworks = true
5 |
6 | timeouts {
7 | delete = "6m"
8 | }
9 | }
10 |
11 | resource "google_compute_firewall" "allow-ssh" {
12 | name = "${var.cluster_name}-allow-ssh"
13 | network = google_compute_network.network.name
14 |
15 | allow {
16 | protocol = "tcp"
17 | ports = [22]
18 | }
19 |
20 | source_ranges = ["0.0.0.0/0"]
21 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
22 | }
23 |
24 | resource "google_compute_firewall" "internal-etcd" {
25 | name = "${var.cluster_name}-internal-etcd"
26 | network = google_compute_network.network.name
27 |
28 | allow {
29 | protocol = "tcp"
30 | ports = [2379, 2380]
31 | }
32 |
33 | source_tags = ["${var.cluster_name}-controller"]
34 | target_tags = ["${var.cluster_name}-controller"]
35 | }
36 |
37 | # Allow Prometheus to scrape etcd metrics
38 | resource "google_compute_firewall" "internal-etcd-metrics" {
39 | name = "${var.cluster_name}-internal-etcd-metrics"
40 | network = google_compute_network.network.name
41 |
42 | allow {
43 | protocol = "tcp"
44 | ports = [2381]
45 | }
46 |
47 | source_tags = ["${var.cluster_name}-worker"]
48 | target_tags = ["${var.cluster_name}-controller"]
49 | }
50 |
51 | # Allow Prometheus to scrape kube-scheduler and kube-controller-manager metrics
52 | resource "google_compute_firewall" "internal-kube-metrics" {
53 | name = "${var.cluster_name}-internal-kube-metrics"
54 | network = google_compute_network.network.name
55 |
56 | allow {
57 | protocol = "tcp"
58 | ports = [10257, 10259]
59 | }
60 |
61 | source_tags = ["${var.cluster_name}-worker"]
62 | target_tags = ["${var.cluster_name}-controller"]
63 | }
64 |
65 | resource "google_compute_firewall" "allow-apiserver" {
66 | name = "${var.cluster_name}-allow-apiserver"
67 | network = google_compute_network.network.name
68 |
69 | allow {
70 | protocol = "tcp"
71 | ports = [6443]
72 | }
73 |
74 | source_ranges = ["0.0.0.0/0"]
75 | target_tags = ["${var.cluster_name}-controller"]
76 | }
77 |
78 | # BGP and IPIP
79 | # https://docs.projectcalico.org/latest/reference/public-cloud/gce
80 | resource "google_compute_firewall" "internal-bgp" {
81 | count = var.networking != "flannel" ? 1 : 0
82 |
83 | name = "${var.cluster_name}-internal-bgp"
84 | network = google_compute_network.network.name
85 |
86 | allow {
87 | protocol = "tcp"
88 | ports = ["179"]
89 | }
90 |
91 | allow {
92 | protocol = "ipip"
93 | }
94 |
95 | source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
96 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
97 | }
98 |
99 | # flannel VXLAN
100 | resource "google_compute_firewall" "internal-vxlan" {
101 | count = var.networking == "flannel" ? 1 : 0
102 |
103 | name = "${var.cluster_name}-internal-vxlan"
104 | network = google_compute_network.network.name
105 |
106 | allow {
107 | protocol = "udp"
108 | ports = [4789]
109 | }
110 |
111 | source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
112 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
113 | }
114 |
115 | # Cilium VXLAN
116 | resource "google_compute_firewall" "internal-linux-vxlan" {
117 | count = var.networking == "cilium" ? 1 : 0
118 |
119 | name = "${var.cluster_name}-linux-vxlan"
120 | network = google_compute_network.network.name
121 |
122 | allow {
123 | protocol = "udp"
124 | ports = [8472]
125 | }
126 |
127 | # Cilium health
128 | allow {
129 | protocol = "icmp"
130 | }
131 |
132 | allow {
133 | protocol = "tcp"
134 | ports = [4240]
135 | }
136 |
137 | source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
138 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
139 | }
140 |
141 | # Allow Prometheus to scrape node-exporter daemonset
142 | resource "google_compute_firewall" "internal-node-exporter" {
143 | name = "${var.cluster_name}-internal-node-exporter"
144 | network = google_compute_network.network.name
145 |
146 | allow {
147 | protocol = "tcp"
148 | ports = [9100]
149 | }
150 |
151 | source_tags = ["${var.cluster_name}-worker"]
152 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
153 | }
154 |
155 | # Allow Prometheus to scrape kube-proxy metrics
156 | resource "google_compute_firewall" "internal-kube-proxy" {
157 | name = "${var.cluster_name}-internal-kube-proxy"
158 | network = google_compute_network.network.name
159 |
160 | allow {
161 | protocol = "tcp"
162 | ports = [10249]
163 | }
164 |
165 | source_tags = ["${var.cluster_name}-worker"]
166 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
167 | }
168 |
169 | # Allow apiserver to access kubelets for exec, log, port-forward
170 | resource "google_compute_firewall" "internal-kubelet" {
171 | name = "${var.cluster_name}-internal-kubelet"
172 | network = google_compute_network.network.name
173 |
174 | allow {
175 | protocol = "tcp"
176 | ports = [10250]
177 | }
178 |
179 | # allow Prometheus to scrape kubelet metrics too
180 | source_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
181 | target_tags = ["${var.cluster_name}-controller", "${var.cluster_name}-worker"]
182 | }
183 |
184 | # Workers
185 |
186 | resource "google_compute_firewall" "allow-ingress" {
187 | name = "${var.cluster_name}-allow-ingress"
188 | network = google_compute_network.network.name
189 |
190 | allow {
191 | protocol = "tcp"
192 | ports = [80, 443]
193 | }
194 |
195 | source_ranges = ["0.0.0.0/0"]
196 | target_tags = ["${var.cluster_name}-worker"]
197 | }
198 |
199 | resource "google_compute_firewall" "google-worker-health-checks" {
200 | name = "${var.cluster_name}-worker-health"
201 | network = google_compute_network.network.name
202 |
203 | allow {
204 | protocol = "tcp"
205 | ports = [10256]
206 | }
207 |
208 | # https://cloud.google.com/compute/docs/instance-groups/autohealing-instances-in-migs
209 | source_ranges = [
210 | "35.191.0.0/16",
211 | "130.211.0.0/22",
212 | ]
213 |
214 | target_tags = ["${var.cluster_name}-worker"]
215 | }
216 |
217 | resource "google_compute_firewall" "google-ingress-health-checks" {
218 | name = "${var.cluster_name}-ingress-health"
219 | network = google_compute_network.network.name
220 |
221 | allow {
222 | protocol = "tcp"
223 | ports = [10254]
224 | }
225 |
226 | # https://cloud.google.com/load-balancing/docs/health-check-concepts#method
227 | source_ranges = [
228 | "35.191.0.0/16",
229 | "130.211.0.0/22",
230 | "35.191.0.0/16",
231 | "209.85.152.0/22",
232 | "209.85.204.0/22",
233 | ]
234 |
235 | target_tags = ["${var.cluster_name}-worker"]
236 | }
237 |
238 |
--------------------------------------------------------------------------------
/outputs.tf:
--------------------------------------------------------------------------------
1 | output "kubeconfig-admin" {
2 | value = module.bootstrap.kubeconfig-admin
3 | sensitive = true
4 | }
5 |
6 | # Outputs for Kubernetes Ingress
7 |
8 | output "ingress_static_ipv4" {
9 | description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller"
10 | value = google_compute_global_address.ingress-ipv4.address
11 | }
12 |
13 | output "ingress_static_ipv6" {
14 | description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller"
15 | value = google_compute_global_address.ingress-ipv6.address
16 | }
17 |
18 | # Outputs for worker pools
19 |
20 | output "network_name" {
21 | value = google_compute_network.network.name
22 | }
23 |
24 | output "kubeconfig" {
25 | value = module.bootstrap.kubeconfig-kubelet
26 | sensitive = true
27 | }
28 |
29 | # Outputs for custom firewalling
30 |
31 | output "network_self_link" {
32 | value = google_compute_network.network.self_link
33 | }
34 |
35 | # Outputs for custom load balancing
36 |
37 | output "worker_instance_group" {
38 | description = "Worker managed instance group full URL"
39 | value = module.workers.instance_group
40 | }
41 |
42 | output "worker_target_pool" {
43 | description = "Worker target pool self link"
44 | value = module.workers.target_pool
45 | }
46 |
47 | # Outputs for debug
48 |
49 | output "assets_dist" {
50 | value = module.bootstrap.assets_dist
51 | sensitive = true
52 | }
53 |
54 |
--------------------------------------------------------------------------------
/ssh.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | # format assets for distribution
3 | assets_bundle = [
4 | # header with the unpack location
5 | for key, value in module.bootstrap.assets_dist :
6 | format("##### %s\n%s", key, value)
7 | ]
8 | }
9 |
10 | # Secure copy assets to controllers.
11 | resource "null_resource" "copy-controller-secrets" {
12 | count = var.controller_count
13 |
14 | depends_on = [
15 | module.bootstrap,
16 | ]
17 |
18 | connection {
19 | type = "ssh"
20 | host = local.controllers_ipv4_public[count.index]
21 | user = "core"
22 | timeout = "15m"
23 | }
24 |
25 | provisioner "file" {
26 | content = join("\n", local.assets_bundle)
27 | destination = "/home/core/assets"
28 | }
29 |
30 | provisioner "remote-exec" {
31 | inline = [
32 | "sudo /opt/bootstrap/layout",
33 | ]
34 | }
35 | }
36 |
37 | # Connect to a controller to perform one-time cluster bootstrap.
38 | resource "null_resource" "bootstrap" {
39 | depends_on = [
40 | null_resource.copy-controller-secrets,
41 | module.workers,
42 | google_dns_record_set.apiserver,
43 | ]
44 |
45 | connection {
46 | type = "ssh"
47 | host = local.controllers_ipv4_public[0]
48 | user = "core"
49 | timeout = "15m"
50 | }
51 |
52 | provisioner "remote-exec" {
53 | inline = [
54 | "sudo systemctl start bootstrap",
55 | ]
56 | }
57 | }
58 |
59 |
--------------------------------------------------------------------------------
/variables.tf:
--------------------------------------------------------------------------------
1 | variable "cluster_name" {
2 | type = string
3 | description = "Unique cluster name (prepended to dns_zone)"
4 | }
5 |
6 | # Google Cloud
7 |
8 | variable "region" {
9 | type = string
10 | description = "Google Cloud Region (e.g. us-central1, see `gcloud compute regions list`)"
11 | }
12 |
13 | variable "dns_zone" {
14 | type = string
15 | description = "Google Cloud DNS Zone (e.g. google-cloud.example.com)"
16 | }
17 |
18 | variable "dns_zone_name" {
19 | type = string
20 | description = "Google Cloud DNS Zone name (e.g. example-zone)"
21 | }
22 |
23 | # instances
24 |
25 | variable "controller_count" {
26 | type = number
27 | description = "Number of controllers (i.e. masters)"
28 | default = 1
29 | }
30 |
31 | variable "worker_count" {
32 | type = number
33 | description = "Number of workers"
34 | default = 1
35 | }
36 |
37 | variable "controller_type" {
38 | type = string
39 | description = "Machine type for controllers (see `gcloud compute machine-types list`)"
40 | default = "n1-standard-1"
41 | }
42 |
43 | variable "worker_type" {
44 | type = string
45 | description = "Machine type for controllers (see `gcloud compute machine-types list`)"
46 | default = "n1-standard-1"
47 | }
48 |
49 | variable "os_image" {
50 | type = string
51 | description = "Flatcar Linux image for compute instances (flatcar-stable, flatcar-beta, flatcar-alpha)"
52 | default = "flatcar-stable"
53 |
54 | validation {
55 | condition = contains(["flatcar-stable", "flatcar-beta", "flatcar-alpha"], var.os_image)
56 | error_message = "The os_image must be flatcar-stable, flatcar-beta, or flatcar-alpha."
57 | }
58 | }
59 |
60 | variable "disk_size" {
61 | type = number
62 | description = "Size of the disk in GB"
63 | default = 30
64 | }
65 |
66 | variable "worker_preemptible" {
67 | type = bool
68 | description = "If enabled, Compute Engine will terminate workers randomly within 24 hours"
69 | default = false
70 | }
71 |
72 | variable "controller_snippets" {
73 | type = list(string)
74 | description = "Controller Container Linux Config snippets"
75 | default = []
76 | }
77 |
78 | variable "worker_snippets" {
79 | type = list(string)
80 | description = "Worker Container Linux Config snippets"
81 | default = []
82 | }
83 |
84 | # configuration
85 |
86 | variable "ssh_authorized_key" {
87 | type = string
88 | description = "SSH public key for user 'core'"
89 | }
90 |
91 | variable "networking" {
92 | type = string
93 | description = "Choice of networking provider (flannel, calico, or cilium)"
94 | default = "cilium"
95 | }
96 |
97 | variable "pod_cidr" {
98 | type = string
99 | description = "CIDR IPv4 range to assign Kubernetes pods"
100 | default = "10.2.0.0/16"
101 | }
102 |
103 | variable "service_cidr" {
104 | type = string
105 | description = < /dev/null; do sleep 1; done'
20 | [Install]
21 | RequiredBy=kubelet.service
22 | - name: kubelet.service
23 | enabled: true
24 | contents: |
25 | [Unit]
26 | Description=Kubelet
27 | Requires=docker.service
28 | After=docker.service
29 | Wants=rpc-statd.service
30 | [Service]
31 | Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.26.0
32 | ExecStartPre=/bin/mkdir -p /etc/cni/net.d
33 | ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
34 | ExecStartPre=/bin/mkdir -p /opt/cni/bin
35 | ExecStartPre=/bin/mkdir -p /var/lib/calico
36 | ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
37 | ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
38 | # Podman, rkt, or runc run container processes, whereas docker run
39 | # is a client to a daemon and requires workarounds to use within a
40 | # systemd unit. https://github.com/moby/moby/issues/6791
41 | ExecStartPre=/usr/bin/docker run -d \
42 | --name kubelet \
43 | --privileged \
44 | --pid host \
45 | --network host \
46 | -v /etc/cni/net.d:/etc/cni/net.d:ro \
47 | -v /etc/kubernetes:/etc/kubernetes:ro \
48 | -v /etc/machine-id:/etc/machine-id:ro \
49 | -v /usr/lib/os-release:/etc/os-release:ro \
50 | -v /lib/modules:/lib/modules:ro \
51 | -v /run:/run \
52 | -v /sys/fs/cgroup:/sys/fs/cgroup:ro \
53 | -v /var/lib/calico:/var/lib/calico:ro \
54 | -v /var/lib/containerd:/var/lib/containerd \
55 | -v /var/lib/kubelet:/var/lib/kubelet:rshared \
56 | -v /var/log:/var/log \
57 | -v /opt/cni/bin:/opt/cni/bin \
58 | $${KUBELET_IMAGE} \
59 | --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
60 | --config=/etc/kubernetes/kubelet.yaml \
61 | --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
62 | --kubeconfig=/var/lib/kubelet/kubeconfig \
63 | %{~ for label in split(",", node_labels) ~}
64 | --node-labels=${label} \
65 | %{~ endfor ~}
66 | %{~ for taint in split(",", node_taints) ~}
67 | --register-with-taints=${taint} \
68 | %{~ endfor ~}
69 | --node-labels=node.kubernetes.io/node
70 | ExecStart=docker logs -f kubelet
71 | ExecStop=docker stop kubelet
72 | ExecStopPost=docker rm kubelet
73 | Restart=always
74 | RestartSec=5
75 | [Install]
76 | WantedBy=multi-user.target
77 | storage:
78 | files:
79 | - path: /etc/kubernetes/kubeconfig
80 | mode: 0644
81 | contents:
82 | inline: |
83 | ${kubeconfig}
84 | - path: /etc/kubernetes/kubelet.yaml
85 | contents:
86 | inline: |
87 | apiVersion: kubelet.config.k8s.io/v1beta1
88 | kind: KubeletConfiguration
89 | authentication:
90 | anonymous:
91 | enabled: false
92 | webhook:
93 | enabled: true
94 | x509:
95 | clientCAFile: /etc/kubernetes/ca.crt
96 | authorization:
97 | mode: Webhook
98 | cgroupDriver: systemd
99 | clusterDNS:
100 | - ${cluster_dns_service_ip}
101 | clusterDomain: ${cluster_domain_suffix}
102 | healthzPort: 0
103 | rotateCertificates: true
104 | shutdownGracePeriod: 45s
105 | shutdownGracePeriodCriticalPods: 30s
106 | staticPodPath: /etc/kubernetes/manifests
107 | readOnlyPort: 0
108 | resolvConf: /run/systemd/resolve/resolv.conf
109 | volumePluginDir: /var/lib/kubelet/volumeplugins
110 | - path: /etc/systemd/logind.conf.d/inhibitors.conf
111 | contents:
112 | inline: |
113 | [Login]
114 | InhibitDelayMaxSec=45s
115 | - path: /etc/sysctl.d/max-user-watches.conf
116 | mode: 0644
117 | contents:
118 | inline: |
119 | fs.inotify.max_user_watches=16184
120 | passwd:
121 | users:
122 | - name: core
123 | ssh_authorized_keys:
124 | - "${ssh_authorized_key}"
125 |
--------------------------------------------------------------------------------
/workers/image.tf:
--------------------------------------------------------------------------------
1 | # Flatcar Linux most recent image from channel
2 | data "google_compute_image" "flatcar-linux" {
3 | project = "kinvolk-public"
4 | family = var.os_image
5 | }
6 |
--------------------------------------------------------------------------------
/workers/outputs.tf:
--------------------------------------------------------------------------------
1 | # Outputs for global load balancing
2 |
3 | output "instance_group" {
4 | description = "Worker managed instance group full URL"
5 | value = google_compute_region_instance_group_manager.workers.instance_group
6 | }
7 |
8 | # Outputs for regional load balancing
9 |
10 | output "target_pool" {
11 | description = "Worker target pool self link"
12 | value = google_compute_target_pool.workers.self_link
13 | }
14 |
15 |
--------------------------------------------------------------------------------
/workers/target_pool.tf:
--------------------------------------------------------------------------------
1 | # Target pool for TCP/UDP load balancing
2 | resource "google_compute_target_pool" "workers" {
3 | name = "${var.name}-worker-pool"
4 | region = var.region
5 | session_affinity = "NONE"
6 |
7 | health_checks = [
8 | google_compute_http_health_check.workers.name,
9 | ]
10 | }
11 |
12 | # HTTP Health Check (for TCP/UDP load balancing)
13 | # Forward rules (regional) to target pools don't support different external
14 | # and internal ports. Health check for nodes with Ingress controllers that
15 | # may support proxying or otherwise satisfy the check.
16 | resource "google_compute_http_health_check" "workers" {
17 | name = "${var.name}-target-pool-health"
18 | description = "Health check for the worker target pool"
19 |
20 | port = 10254
21 | request_path = "/healthz"
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/workers/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | type = string
3 | description = "Unique name for the worker pool"
4 | }
5 |
6 | variable "cluster_name" {
7 | type = string
8 | description = "Must be set to `cluster_name of cluster`"
9 | }
10 |
11 | # Google Cloud
12 |
13 | variable "region" {
14 | type = string
15 | description = "Must be set to `region` of cluster"
16 | }
17 |
18 | variable "network" {
19 | type = string
20 | description = "Must be set to `network_name` output by cluster"
21 | }
22 |
23 | # instances
24 |
25 | variable "worker_count" {
26 | type = number
27 | description = "Number of worker compute instances the instance group should manage"
28 | default = 1
29 | }
30 |
31 | variable "machine_type" {
32 | type = string
33 | description = "Machine type for compute instances (e.g. gcloud compute machine-types list)"
34 | default = "n1-standard-1"
35 | }
36 |
37 | variable "os_image" {
38 | type = string
39 | description = "Flatcar Linux image for compute instances (flatcar-stable, flatcar-beta, flatcar-alpha)"
40 | default = "flatcar-stable"
41 |
42 | validation {
43 | condition = contains(["flatcar-stable", "flatcar-beta", "flatcar-alpha"], var.os_image)
44 | error_message = "The os_image must be flatcar-stable, flatcar-beta, or flatcar-alpha."
45 | }
46 | }
47 |
48 | variable "disk_size" {
49 | type = number
50 | description = "Size of the disk in GB"
51 | default = 30
52 | }
53 |
54 | variable "preemptible" {
55 | type = bool
56 | description = "If enabled, Compute Engine will terminate instances randomly within 24 hours"
57 | default = false
58 | }
59 |
60 | variable "snippets" {
61 | type = list(string)
62 | description = "Container Linux Config snippets"
63 | default = []
64 | }
65 |
66 | # configuration
67 |
68 | variable "kubeconfig" {
69 | type = string
70 | description = "Must be set to `kubeconfig` output by cluster"
71 | }
72 |
73 | variable "ssh_authorized_key" {
74 | type = string
75 | description = "SSH public key for user 'core'"
76 | }
77 |
78 | variable "service_cidr" {
79 | type = string
80 | description = <