├── .gitignore ├── resources ├── coredns │ ├── service-account.yaml │ ├── cluster-role-binding.yaml │ ├── service.yaml │ ├── cluster-role.yaml │ ├── config.yaml │ └── deployment.yaml ├── flannel │ ├── service-account.yaml │ ├── cluster-role-binding.yaml │ ├── cluster-role.yaml │ ├── config.yaml │ └── daemonset.yaml ├── kube-proxy │ ├── kube-proxy-sa.yaml │ ├── kube-proxy-role-binding.yaml │ └── kube-proxy.yaml ├── cilium │ ├── service-account.yaml │ ├── cluster-role-binding.yaml │ ├── deployment.yaml │ ├── cluster-role.yaml │ ├── config.yaml │ └── daemonset.yaml ├── kubeconfig-bootstrap ├── manifests │ ├── in-cluster.yaml │ ├── kubelet-delete-cluster-role-binding.yaml │ ├── bootstrap-token.yaml │ ├── bootstrap-cluster-role-binding.yaml │ ├── bootstrap-renew-approve-cluster-role-binding.yaml │ ├── bootstrap-new-approve-cluster-role-binding.yaml │ ├── kubelet-delete-cluster-role.yaml │ └── kubeconfig-in-cluster.yaml ├── kubeconfig-admin └── static-manifests │ ├── kube-scheduler.yaml │ ├── kube-controller-manager.yaml │ └── kube-apiserver.yaml ├── .github ├── dependabot.yaml └── workflows │ └── test.yaml ├── terraform.tfvars.example ├── versions.tf ├── LICENSE ├── conditional.tf ├── README.md ├── outputs.tf ├── tls-aggregation.tf ├── auth.tf ├── manifests.tf ├── tls-etcd.tf ├── variables.tf └── tls-k8s.tf /.gitignore: -------------------------------------------------------------------------------- 1 | *.tfvars 2 | .terraform 3 | *.tfstate* 4 | assets 5 | -------------------------------------------------------------------------------- /resources/coredns/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /resources/flannel/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: flannel 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /resources/kube-proxy/kube-proxy-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: kube-system 5 | name: kube-proxy 6 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | -------------------------------------------------------------------------------- /terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | cluster_name = "example" 2 | api_servers = ["node1.example.com"] 3 | etcd_servers = ["node1.example.com"] 4 | networking = "flannel" 5 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | random = "~> 3.1" 7 | tls = "~> 4.0" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /resources/cilium/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cilium-operator 5 | namespace: kube-system 6 | 7 | --- 8 | apiVersion: v1 9 | kind: ServiceAccount 10 | metadata: 11 | name: cilium-agent 12 | namespace: kube-system 13 | 14 | -------------------------------------------------------------------------------- /resources/flannel/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: flannel 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: flannel 9 | subjects: 10 | - kind: ServiceAccount 11 | name: flannel 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /resources/kubeconfig-bootstrap: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: local 5 | cluster: 6 | server: ${server} 7 | certificate-authority-data: ${ca_cert} 8 | users: 9 | - name: kubelet 10 | user: 11 | token: ${token_id}.${token_secret} 12 | contexts: 13 | - context: 14 | cluster: local 15 | user: kubelet 16 | -------------------------------------------------------------------------------- /resources/manifests/in-cluster.yaml: -------------------------------------------------------------------------------- 1 | # in-cluster ConfigMap is for control plane components that must reach 2 | # kube-apiserver before service IPs are available (e.g. 10.3.0.1) 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: in-cluster 7 | namespace: kube-system 8 | data: 9 | apiserver-host: ${apiserver_host} 10 | apiserver-port: "${apiserver_port}" 11 | -------------------------------------------------------------------------------- /resources/manifests/kubelet-delete-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kubelet-delete 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kubelet-delete 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: system:nodes 13 | -------------------------------------------------------------------------------- /resources/kube-proxy/kube-proxy-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kube-proxy 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:node-proxier # Automatically created system role. 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kube-proxy 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /resources/manifests/bootstrap-token.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: bootstrap.kubernetes.io/token 4 | metadata: 5 | # Name MUST be of form "bootstrap-token-" 6 | name: bootstrap-token-${token_id} 7 | namespace: kube-system 8 | stringData: 9 | description: "Typhoon generated bootstrap token" 10 | token-id: ${token_id} 11 | token-secret: ${token_secret} 12 | usage-bootstrap-authentication: "true" 13 | -------------------------------------------------------------------------------- /resources/kubeconfig-admin: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: ${name} 5 | cluster: 6 | server: ${server} 7 | certificate-authority-data: ${ca_cert} 8 | users: 9 | - name: ${name} 10 | user: 11 | client-certificate-data: ${kubelet_cert} 12 | client-key-data: ${kubelet_key} 13 | current-context: ${name} 14 | contexts: 15 | - name: ${name} 16 | context: 17 | cluster: ${name} 18 | user: ${name} 19 | -------------------------------------------------------------------------------- /resources/manifests/bootstrap-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | # Bind system:bootstrappers to ClusterRole for node bootstrap 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: bootstrap-node 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:node-bootstrapper 10 | subjects: 11 | - apiGroup: rbac.authorization.k8s.io 12 | kind: Group 13 | name: system:bootstrappers 14 | -------------------------------------------------------------------------------- /resources/flannel/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: flannel 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | verbs: 11 | - get 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - nodes 16 | verbs: 17 | - list 18 | - watch 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - nodes/status 23 | verbs: 24 | - patch 25 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | jobs: 8 | terraform: 9 | name: fmt 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: checkout 13 | uses: actions/checkout@v6 14 | 15 | - name: terraform 16 | uses: hashicorp/setup-terraform@v3 17 | with: 18 | terraform_version: 1.11.1 19 | 20 | - name: fmt 21 | run: terraform fmt -check -diff -recursive 22 | -------------------------------------------------------------------------------- /resources/coredns/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: system:coredns 5 | labels: 6 | kubernetes.io/bootstrapping: rbac-defaults 7 | annotations: 8 | rbac.authorization.kubernetes.io/autoupdate: "true" 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: system:coredns 13 | subjects: 14 | - kind: ServiceAccount 15 | name: coredns 16 | namespace: kube-system 17 | -------------------------------------------------------------------------------- /resources/manifests/bootstrap-renew-approve-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | # Approve renewal CSRs from "system:nodes" subjects 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: bootstrap-approve-renew 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 10 | subjects: 11 | - apiGroup: rbac.authorization.k8s.io 12 | kind: Group 13 | name: system:nodes 14 | -------------------------------------------------------------------------------- /resources/manifests/bootstrap-new-approve-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | # Approve new CSRs from "system:bootstrappers" subjects 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: bootstrap-approve-new 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient 10 | subjects: 11 | - apiGroup: rbac.authorization.k8s.io 12 | kind: Group 13 | name: system:bootstrappers 14 | -------------------------------------------------------------------------------- /resources/coredns/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | annotations: 7 | prometheus.io/scrape: "true" 8 | prometheus.io/port: "9153" 9 | labels: 10 | k8s-app: coredns 11 | kubernetes.io/name: "CoreDNS" 12 | spec: 13 | selector: 14 | k8s-app: coredns 15 | clusterIP: ${cluster_dns_service_ip} 16 | ports: 17 | - name: dns 18 | port: 53 19 | protocol: UDP 20 | - name: dns-tcp 21 | port: 53 22 | protocol: TCP 23 | -------------------------------------------------------------------------------- /resources/manifests/kubelet-delete-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kubelet-delete 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - nodes 9 | verbs: 10 | - delete 11 | - apiGroups: ["apps"] 12 | resources: 13 | - deployments 14 | - daemonsets 15 | - statefulsets 16 | verbs: 17 | - get 18 | - list 19 | - apiGroups: [""] 20 | resources: 21 | - pods/eviction 22 | verbs: 23 | - create 24 | -------------------------------------------------------------------------------- /resources/coredns/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: system:coredns 5 | labels: 6 | kubernetes.io/bootstrapping: rbac-defaults 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | - endpoints 11 | - services 12 | - pods 13 | - namespaces 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: ["discovery.k8s.io"] 18 | resources: 19 | - endpointslices 20 | verbs: 21 | - list 22 | - watch 23 | - apiGroups: [""] 24 | resources: 25 | - nodes 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /resources/coredns/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | data: 7 | Corefile: | 8 | .:53 { 9 | errors 10 | health { 11 | lameduck 5s 12 | } 13 | ready 14 | log . { 15 | class error 16 | } 17 | kubernetes ${cluster_domain_suffix} in-addr.arpa ip6.arpa { 18 | pods insecure 19 | fallthrough in-addr.arpa ip6.arpa 20 | } 21 | prometheus :9153 22 | forward . /etc/resolv.conf 23 | cache 30 24 | loop 25 | reload 26 | loadbalance 27 | } 28 | -------------------------------------------------------------------------------- /resources/cilium/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: cilium-operator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cilium-operator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: cilium-operator 12 | namespace: kube-system 13 | 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: cilium-agent 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: cilium-agent 23 | subjects: 24 | - kind: ServiceAccount 25 | name: cilium-agent 26 | namespace: kube-system 27 | 28 | -------------------------------------------------------------------------------- /resources/manifests/kubeconfig-in-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kubeconfig-in-cluster 5 | namespace: kube-system 6 | data: 7 | kubeconfig: | 8 | apiVersion: v1 9 | clusters: 10 | - name: local 11 | cluster: 12 | # kubeconfig-in-cluster is for control plane components that must reach 13 | # kube-apiserver before service IPs are available (e.g.10.3.0.1) 14 | server: ${server} 15 | certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 16 | users: 17 | - name: service-account 18 | user: 19 | # Use service account token 20 | tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 21 | contexts: 22 | - context: 23 | cluster: local 24 | user: service-account 25 | -------------------------------------------------------------------------------- /resources/flannel/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: flannel-config 5 | namespace: kube-system 6 | labels: 7 | tier: node 8 | k8s-app: flannel 9 | data: 10 | cni-conf.json: | 11 | { 12 | "name": "cbr0", 13 | "cniVersion": "0.3.1", 14 | "plugins": [ 15 | { 16 | "type": "flannel", 17 | "delegate": { 18 | "hairpinMode": true, 19 | "isDefaultGateway": true 20 | } 21 | }, 22 | { 23 | "type": "portmap", 24 | "capabilities": { 25 | "portMappings": true 26 | } 27 | } 28 | ] 29 | } 30 | net-conf.json: | 31 | { 32 | "Network": "${pod_cidr}", 33 | "Backend": { 34 | "Type": "vxlan", 35 | "Port": 8472 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Dalton Hubble 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /resources/static-manifests/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-scheduler 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-scheduler 8 | tier: control-plane 9 | spec: 10 | hostNetwork: true 11 | priorityClassName: system-cluster-critical 12 | securityContext: 13 | runAsNonRoot: true 14 | runAsUser: 65534 15 | seccompProfile: 16 | type: RuntimeDefault 17 | containers: 18 | - name: kube-scheduler 19 | image: ${kube_scheduler_image} 20 | command: 21 | - kube-scheduler 22 | - --authentication-kubeconfig=/etc/kubernetes/pki/scheduler.conf 23 | - --authorization-kubeconfig=/etc/kubernetes/pki/scheduler.conf 24 | - --kubeconfig=/etc/kubernetes/pki/scheduler.conf 25 | - --leader-elect=true 26 | livenessProbe: 27 | httpGet: 28 | scheme: HTTPS 29 | host: 127.0.0.1 30 | path: /healthz 31 | port: 10259 32 | initialDelaySeconds: 15 33 | timeoutSeconds: 15 34 | resources: 35 | requests: 36 | cpu: 100m 37 | volumeMounts: 38 | - name: secrets 39 | mountPath: /etc/kubernetes/pki/scheduler.conf 40 | readOnly: true 41 | volumes: 42 | - name: secrets 43 | hostPath: 44 | path: /etc/kubernetes/pki/scheduler.conf 45 | -------------------------------------------------------------------------------- /conditional.tf: -------------------------------------------------------------------------------- 1 | # Assets generated only when certain options are chosen 2 | 3 | locals { 4 | # flannel manifests map 5 | # { manifests-networking/manifest.yaml => content } 6 | flannel_manifests = { 7 | for name in fileset("${path.module}/resources/flannel", "*.yaml") : 8 | "manifests/network/${name}" => templatefile( 9 | "${path.module}/resources/flannel/${name}", 10 | { 11 | flannel_image = var.container_images["flannel"] 12 | flannel_cni_image = var.container_images["flannel_cni"] 13 | pod_cidr = var.pod_cidr 14 | daemonset_tolerations = var.daemonset_tolerations 15 | } 16 | ) 17 | if var.components.enable && var.components.flannel.enable && var.networking == "flannel" 18 | } 19 | 20 | # cilium manifests map 21 | # { manifests-networking/manifest.yaml => content } 22 | cilium_manifests = { 23 | for name in fileset("${path.module}/resources/cilium", "**/*.yaml") : 24 | "manifests/network/${name}" => templatefile( 25 | "${path.module}/resources/cilium/${name}", 26 | { 27 | cilium_agent_image = var.container_images["cilium_agent"] 28 | cilium_operator_image = var.container_images["cilium_operator"] 29 | pod_cidr = var.pod_cidr 30 | daemonset_tolerations = var.daemonset_tolerations 31 | } 32 | ) 33 | if var.components.enable && var.components.cilium.enable && var.networking == "cilium" 34 | } 35 | } 36 | 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-render-bootstrap 2 | [![Workflow](https://github.com/poseidon/terraform-render-bootstrap/actions/workflows/test.yaml/badge.svg)](https://github.com/poseidon/terraform-render-bootstrap/actions/workflows/test.yaml?query=branch%3Amain) 3 | [![Sponsors](https://img.shields.io/github/sponsors/poseidon?logo=github)](https://github.com/sponsors/poseidon) 4 | [![Mastodon](https://img.shields.io/badge/follow-news-6364ff?logo=mastodon)](https://fosstodon.org/@typhoon) 5 | 6 | `terraform-render-bootstrap` is a Terraform module that renders TLS certificates, static pods, and manifests for bootstrapping a Kubernetes cluster. 7 | 8 | ## Audience 9 | 10 | `terraform-render-bootstrap` is a low-level component of the [Typhoon](https://github.com/poseidon/typhoon) Kubernetes distribution. Use Typhoon modules to create and manage Kubernetes clusters across supported platforms. Use the bootstrap module if you'd like to customize a Kubernetes control plane or build your own distribution. 11 | 12 | ## Usage 13 | 14 | Use the module to declare bootstrap assets. Check [variables.tf](variables.tf) for options and [terraform.tfvars.example](terraform.tfvars.example) for examples. 15 | 16 | ```hcl 17 | module "bootstrap" { 18 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=SHA" 19 | 20 | cluster_name = "example" 21 | api_servers = ["node1.example.com"] 22 | etcd_servers = ["node1.example.com"] 23 | } 24 | ``` 25 | 26 | Generate assets in Terraform state. 27 | 28 | ```sh 29 | terraform init 30 | terraform plan 31 | terraform apply 32 | ``` 33 | 34 | To inspect and write assets locally (e.g. debugging) use the `assets_dist` Terraform output. 35 | 36 | ``` 37 | resource local_file "assets" { 38 | for_each = module.bootstrap.assets_dist 39 | filename = "some-assets/${each.key}" 40 | content = each.value 41 | } 42 | ``` 43 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "cluster_dns_service_ip" { 3 | value = cidrhost(var.service_cidr, 10) 4 | } 5 | 6 | // Generated kubeconfig for Kubelets (i.e. lower privilege than admin) 7 | output "kubeconfig-kubelet" { 8 | value = local.kubeconfig-bootstrap 9 | sensitive = true 10 | } 11 | 12 | // Generated kubeconfig for admins (i.e. human super-user) 13 | output "kubeconfig-admin" { 14 | value = local.kubeconfig-admin 15 | sensitive = true 16 | } 17 | 18 | # assets to distribute to controllers 19 | # { some/path => content } 20 | output "assets_dist" { 21 | # combine maps of assets 22 | value = merge( 23 | local.auth_kubeconfigs, 24 | local.etcd_tls, 25 | local.kubernetes_tls, 26 | local.aggregation_tls, 27 | local.static_manifests, 28 | local.manifests, 29 | local.flannel_manifests, 30 | local.cilium_manifests, 31 | ) 32 | sensitive = true 33 | } 34 | 35 | # etcd TLS assets 36 | 37 | output "etcd_ca_cert" { 38 | value = tls_self_signed_cert.etcd-ca.cert_pem 39 | sensitive = true 40 | } 41 | 42 | output "etcd_client_cert" { 43 | value = tls_locally_signed_cert.client.cert_pem 44 | sensitive = true 45 | } 46 | 47 | output "etcd_client_key" { 48 | value = tls_private_key.client.private_key_pem 49 | sensitive = true 50 | } 51 | 52 | output "etcd_server_cert" { 53 | value = tls_locally_signed_cert.server.cert_pem 54 | sensitive = true 55 | } 56 | 57 | output "etcd_server_key" { 58 | value = tls_private_key.server.private_key_pem 59 | sensitive = true 60 | } 61 | 62 | output "etcd_peer_cert" { 63 | value = tls_locally_signed_cert.peer.cert_pem 64 | sensitive = true 65 | } 66 | 67 | output "etcd_peer_key" { 68 | value = tls_private_key.peer.private_key_pem 69 | sensitive = true 70 | } 71 | 72 | # Kubernetes TLS assets 73 | 74 | output "service_account_public_key" { 75 | value = tls_private_key.service-account.public_key_pem 76 | } 77 | -------------------------------------------------------------------------------- /tls-aggregation.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Kubernetes Aggregation TLS assets map 3 | aggregation_tls = var.enable_aggregation ? { 4 | "tls/k8s/aggregation-ca.crt" = tls_self_signed_cert.aggregation-ca[0].cert_pem, 5 | "tls/k8s/aggregation-client.crt" = tls_locally_signed_cert.aggregation-client[0].cert_pem, 6 | "tls/k8s/aggregation-client.key" = tls_private_key.aggregation-client[0].private_key_pem, 7 | } : {} 8 | } 9 | 10 | # Kubernetes Aggregation CA (i.e. front-proxy-ca) 11 | # Files: tls/{aggregation-ca.crt,aggregation-ca.key} 12 | 13 | resource "tls_private_key" "aggregation-ca" { 14 | count = var.enable_aggregation ? 1 : 0 15 | 16 | algorithm = "RSA" 17 | rsa_bits = "2048" 18 | } 19 | 20 | resource "tls_self_signed_cert" "aggregation-ca" { 21 | count = var.enable_aggregation ? 1 : 0 22 | 23 | private_key_pem = tls_private_key.aggregation-ca[0].private_key_pem 24 | 25 | subject { 26 | common_name = "kubernetes-front-proxy-ca" 27 | } 28 | 29 | is_ca_certificate = true 30 | validity_period_hours = 8760 31 | 32 | allowed_uses = [ 33 | "key_encipherment", 34 | "digital_signature", 35 | "cert_signing", 36 | ] 37 | } 38 | 39 | # Kubernetes apiserver (i.e. front-proxy-client) 40 | # Files: tls/{aggregation-client.crt,aggregation-client.key} 41 | 42 | resource "tls_private_key" "aggregation-client" { 43 | count = var.enable_aggregation ? 1 : 0 44 | 45 | algorithm = "RSA" 46 | rsa_bits = "2048" 47 | } 48 | 49 | resource "tls_cert_request" "aggregation-client" { 50 | count = var.enable_aggregation ? 1 : 0 51 | 52 | private_key_pem = tls_private_key.aggregation-client[0].private_key_pem 53 | 54 | subject { 55 | common_name = "kube-apiserver" 56 | } 57 | } 58 | 59 | resource "tls_locally_signed_cert" "aggregation-client" { 60 | count = var.enable_aggregation ? 1 : 0 61 | 62 | cert_request_pem = tls_cert_request.aggregation-client[0].cert_request_pem 63 | 64 | ca_private_key_pem = tls_private_key.aggregation-ca[0].private_key_pem 65 | ca_cert_pem = tls_self_signed_cert.aggregation-ca[0].cert_pem 66 | 67 | validity_period_hours = 8760 68 | 69 | allowed_uses = [ 70 | "key_encipherment", 71 | "digital_signature", 72 | "client_auth", 73 | ] 74 | } 75 | 76 | -------------------------------------------------------------------------------- /resources/static-manifests/kube-controller-manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-controller-manager 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-controller-manager 8 | tier: control-plane 9 | spec: 10 | hostNetwork: true 11 | priorityClassName: system-cluster-critical 12 | securityContext: 13 | runAsNonRoot: true 14 | runAsUser: 65534 15 | seccompProfile: 16 | type: RuntimeDefault 17 | containers: 18 | - name: kube-controller-manager 19 | image: ${kube_controller_manager_image} 20 | command: 21 | - kube-controller-manager 22 | - --authentication-kubeconfig=/etc/kubernetes/pki/controller-manager.conf 23 | - --authorization-kubeconfig=/etc/kubernetes/pki/controller-manager.conf 24 | - --allocate-node-cidrs=true 25 | - --client-ca-file=/etc/kubernetes/pki/ca.crt 26 | - --cluster-cidr=${pod_cidr} 27 | - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt 28 | - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key 29 | - --cluster-signing-duration=72h 30 | - --controllers=*,tokencleaner 31 | - --configure-cloud-routes=false 32 | - --kubeconfig=/etc/kubernetes/pki/controller-manager.conf 33 | - --leader-elect=true 34 | - --root-ca-file=/etc/kubernetes/pki/ca.crt 35 | - --service-account-private-key-file=/etc/kubernetes/pki/service-account.key 36 | - --service-cluster-ip-range=${service_cidr} 37 | - --use-service-account-credentials=true 38 | livenessProbe: 39 | httpGet: 40 | scheme: HTTPS 41 | host: 127.0.0.1 42 | path: /healthz 43 | port: 10257 44 | initialDelaySeconds: 25 45 | timeoutSeconds: 15 46 | failureThreshold: 8 47 | resources: 48 | requests: 49 | cpu: 150m 50 | volumeMounts: 51 | - name: secrets 52 | mountPath: /etc/kubernetes/pki 53 | readOnly: true 54 | - name: etc-ssl 55 | mountPath: /etc/ssl/certs 56 | readOnly: true 57 | - name: etc-pki 58 | mountPath: /etc/pki 59 | readOnly: true 60 | - name: flex 61 | mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec 62 | volumes: 63 | - name: secrets 64 | hostPath: 65 | path: /etc/kubernetes/pki 66 | - name: etc-ssl 67 | hostPath: 68 | path: /etc/ssl/certs 69 | - name: etc-pki 70 | hostPath: 71 | path: /etc/pki 72 | - name: flex 73 | hostPath: 74 | type: DirectoryOrCreate 75 | path: /var/lib/kubelet/volumeplugins 76 | -------------------------------------------------------------------------------- /resources/static-manifests/kube-apiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-apiserver 8 | tier: control-plane 9 | spec: 10 | hostNetwork: true 11 | priorityClassName: system-cluster-critical 12 | securityContext: 13 | runAsNonRoot: true 14 | runAsUser: 65534 15 | seccompProfile: 16 | type: RuntimeDefault 17 | containers: 18 | - name: kube-apiserver 19 | image: ${kube_apiserver_image} 20 | command: 21 | - kube-apiserver 22 | - --advertise-address=$(POD_IP) 23 | - --allow-privileged=true 24 | - --anonymous-auth=false 25 | - --authorization-mode=Node,RBAC 26 | - --client-ca-file=/etc/kubernetes/pki/ca.crt 27 | - --enable-admission-plugins=NodeRestriction 28 | - --enable-bootstrap-token-auth=true 29 | - --etcd-cafile=/etc/kubernetes/pki/etcd-client-ca.crt 30 | - --etcd-certfile=/etc/kubernetes/pki/etcd-client.crt 31 | - --etcd-keyfile=/etc/kubernetes/pki/etcd-client.key 32 | - --etcd-servers=${etcd_servers} 33 | - --feature-gates=MutatingAdmissionPolicy=true 34 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.crt 35 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver.key 36 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname${aggregation_flags} 37 | - --runtime-config=admissionregistration.k8s.io/v1beta1=true,admissionregistration.k8s.io/v1alpha1=true 38 | - --secure-port=6443 39 | - --service-account-issuer=${service_account_issuer} 40 | - --service-account-jwks-uri=${service_account_issuer}/openid/v1/jwks 41 | - --service-account-key-file=/etc/kubernetes/pki/service-account.pub 42 | - --service-account-signing-key-file=/etc/kubernetes/pki/service-account.key 43 | - --service-cluster-ip-range=${service_cidr} 44 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt 45 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key 46 | env: 47 | - name: POD_IP 48 | valueFrom: 49 | fieldRef: 50 | fieldPath: status.podIP 51 | resources: 52 | requests: 53 | cpu: 150m 54 | volumeMounts: 55 | - name: secrets 56 | mountPath: /etc/kubernetes/pki 57 | readOnly: true 58 | - name: etc-ssl 59 | mountPath: /etc/ssl/certs 60 | readOnly: true 61 | - name: etc-pki 62 | mountPath: /etc/pki 63 | readOnly: true 64 | volumes: 65 | - name: secrets 66 | hostPath: 67 | path: /etc/kubernetes/pki 68 | - name: etc-ssl 69 | hostPath: 70 | path: /etc/ssl/certs 71 | - name: etc-pki 72 | hostPath: 73 | path: /etc/pki 74 | -------------------------------------------------------------------------------- /auth.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # component kubeconfigs assets map 3 | auth_kubeconfigs = { 4 | "auth/admin.conf" = local.kubeconfig-admin, 5 | "auth/controller-manager.conf" = local.kubeconfig-controller-manager 6 | "auth/scheduler.conf" = local.kubeconfig-scheduler 7 | } 8 | } 9 | 10 | locals { 11 | # Generated admin kubeconfig to bootstrap control plane 12 | kubeconfig-admin = templatefile("${path.module}/resources/kubeconfig-admin", 13 | { 14 | name = var.cluster_name 15 | ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) 16 | kubelet_cert = base64encode(tls_locally_signed_cert.admin.cert_pem) 17 | kubelet_key = base64encode(tls_private_key.admin.private_key_pem) 18 | server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) 19 | } 20 | ) 21 | 22 | # Generated kube-controller-manager kubeconfig 23 | kubeconfig-controller-manager = templatefile("${path.module}/resources/kubeconfig-admin", 24 | { 25 | name = var.cluster_name 26 | ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) 27 | kubelet_cert = base64encode(tls_locally_signed_cert.controller-manager.cert_pem) 28 | kubelet_key = base64encode(tls_private_key.controller-manager.private_key_pem) 29 | server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) 30 | } 31 | ) 32 | 33 | # Generated kube-controller-manager kubeconfig 34 | kubeconfig-scheduler = templatefile("${path.module}/resources/kubeconfig-admin", 35 | { 36 | name = var.cluster_name 37 | ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) 38 | kubelet_cert = base64encode(tls_locally_signed_cert.scheduler.cert_pem) 39 | kubelet_key = base64encode(tls_private_key.scheduler.private_key_pem) 40 | server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) 41 | } 42 | ) 43 | 44 | # Generated kubeconfig to bootstrap Kubelets 45 | kubeconfig-bootstrap = templatefile("${path.module}/resources/kubeconfig-bootstrap", 46 | { 47 | ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) 48 | server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) 49 | token_id = random_password.bootstrap-token-id.result 50 | token_secret = random_password.bootstrap-token-secret.result 51 | } 52 | ) 53 | } 54 | 55 | # Generate a cryptographically random token id (public) 56 | resource "random_password" "bootstrap-token-id" { 57 | length = 6 58 | upper = false 59 | special = false 60 | } 61 | 62 | # Generate a cryptographically random token secret 63 | resource "random_password" "bootstrap-token-secret" { 64 | length = 16 65 | upper = false 66 | special = false 67 | } 68 | 69 | -------------------------------------------------------------------------------- /resources/kube-proxy/kube-proxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: kube-proxy 5 | namespace: kube-system 6 | labels: 7 | tier: node 8 | k8s-app: kube-proxy 9 | spec: 10 | selector: 11 | matchLabels: 12 | tier: node 13 | k8s-app: kube-proxy 14 | updateStrategy: 15 | type: RollingUpdate 16 | rollingUpdate: 17 | maxUnavailable: 1 18 | template: 19 | metadata: 20 | labels: 21 | tier: node 22 | k8s-app: kube-proxy 23 | spec: 24 | hostNetwork: true 25 | priorityClassName: system-node-critical 26 | securityContext: 27 | seccompProfile: 28 | type: RuntimeDefault 29 | serviceAccountName: kube-proxy 30 | tolerations: 31 | - key: node-role.kubernetes.io/controller 32 | operator: Exists 33 | - key: node.kubernetes.io/not-ready 34 | operator: Exists 35 | %{~ for key in daemonset_tolerations ~} 36 | - key: ${key} 37 | operator: Exists 38 | %{~ endfor ~} 39 | containers: 40 | - name: kube-proxy 41 | image: ${kube_proxy_image} 42 | command: 43 | - kube-proxy 44 | - --cluster-cidr=${pod_cidr} 45 | - --hostname-override=$(NODE_NAME) 46 | - --kubeconfig=/etc/kubernetes/kubeconfig 47 | - --metrics-bind-address=0.0.0.0 48 | - --proxy-mode=ipvs 49 | env: 50 | - name: NODE_NAME 51 | valueFrom: 52 | fieldRef: 53 | fieldPath: spec.nodeName 54 | ports: 55 | - name: metrics 56 | containerPort: 10249 57 | - name: health 58 | containerPort: 10256 59 | livenessProbe: 60 | httpGet: 61 | path: /healthz 62 | port: 10256 63 | initialDelaySeconds: 15 64 | timeoutSeconds: 15 65 | securityContext: 66 | privileged: true 67 | volumeMounts: 68 | - name: kubeconfig 69 | mountPath: /etc/kubernetes 70 | readOnly: true 71 | - name: lib-modules 72 | mountPath: /lib/modules 73 | readOnly: true 74 | - name: etc-ssl 75 | mountPath: /etc/ssl/certs 76 | readOnly: true 77 | - name: etc-pki 78 | mountPath: /etc/pki 79 | readOnly: true 80 | - name: xtables-lock 81 | mountPath: /run/xtables.lock 82 | volumes: 83 | - name: kubeconfig 84 | configMap: 85 | name: kubeconfig-in-cluster 86 | - name: lib-modules 87 | hostPath: 88 | path: /lib/modules 89 | - name: etc-ssl 90 | hostPath: 91 | path: /etc/ssl/certs 92 | - name: etc-pki 93 | hostPath: 94 | path: /etc/pki 95 | # Access iptables concurrently 96 | - name: xtables-lock 97 | hostPath: 98 | type: FileOrCreate 99 | path: /run/xtables.lock 100 | -------------------------------------------------------------------------------- /resources/cilium/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: cilium-operator 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: RollingUpdate 10 | rollingUpdate: 11 | maxUnavailable: 1 12 | selector: 13 | matchLabels: 14 | name: cilium-operator 15 | template: 16 | metadata: 17 | labels: 18 | name: cilium-operator 19 | spec: 20 | hostNetwork: true 21 | priorityClassName: system-cluster-critical 22 | serviceAccountName: cilium-operator 23 | securityContext: 24 | seccompProfile: 25 | type: RuntimeDefault 26 | tolerations: 27 | - key: node-role.kubernetes.io/controller 28 | operator: Exists 29 | - key: node.kubernetes.io/not-ready 30 | operator: Exists 31 | containers: 32 | - name: cilium-operator 33 | image: ${cilium_operator_image} 34 | command: 35 | - cilium-operator-generic 36 | args: 37 | - --config-dir=/tmp/cilium/config-map 38 | - --debug=$(CILIUM_DEBUG) 39 | env: 40 | - name: K8S_NODE_NAME 41 | valueFrom: 42 | fieldRef: 43 | apiVersion: v1 44 | fieldPath: spec.nodeName 45 | - name: CILIUM_K8S_NAMESPACE 46 | valueFrom: 47 | fieldRef: 48 | apiVersion: v1 49 | fieldPath: metadata.namespace 50 | - name: KUBERNETES_SERVICE_HOST 51 | valueFrom: 52 | configMapKeyRef: 53 | name: in-cluster 54 | key: apiserver-host 55 | - name: KUBERNETES_SERVICE_PORT 56 | valueFrom: 57 | configMapKeyRef: 58 | name: in-cluster 59 | key: apiserver-port 60 | - name: CILIUM_DEBUG 61 | valueFrom: 62 | configMapKeyRef: 63 | name: cilium 64 | key: debug 65 | optional: true 66 | ports: 67 | - name: health 68 | protocol: TCP 69 | containerPort: 9234 70 | livenessProbe: 71 | httpGet: 72 | scheme: HTTP 73 | host: 127.0.0.1 74 | port: 9234 75 | path: /healthz 76 | initialDelaySeconds: 60 77 | periodSeconds: 10 78 | timeoutSeconds: 3 79 | readinessProbe: 80 | httpGet: 81 | scheme: HTTP 82 | host: 127.0.0.1 83 | port: 9234 84 | path: /healthz 85 | periodSeconds: 15 86 | timeoutSeconds: 3 87 | failureThreshold: 5 88 | volumeMounts: 89 | - name: config 90 | mountPath: /tmp/cilium/config-map 91 | readOnly: true 92 | topologySpreadConstraints: 93 | - topologyKey: kubernetes.io/hostname 94 | labelSelector: 95 | matchLabels: 96 | name: cilium-operator 97 | maxSkew: 1 98 | whenUnsatisfiable: DoNotSchedule 99 | volumes: 100 | # Read configuration 101 | - name: config 102 | configMap: 103 | name: cilium 104 | -------------------------------------------------------------------------------- /resources/flannel/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: flannel 5 | namespace: kube-system 6 | labels: 7 | k8s-app: flannel 8 | spec: 9 | selector: 10 | matchLabels: 11 | k8s-app: flannel 12 | updateStrategy: 13 | type: RollingUpdate 14 | rollingUpdate: 15 | maxUnavailable: 1 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: flannel 20 | spec: 21 | hostNetwork: true 22 | priorityClassName: system-node-critical 23 | serviceAccountName: flannel 24 | securityContext: 25 | seccompProfile: 26 | type: RuntimeDefault 27 | tolerations: 28 | - key: node-role.kubernetes.io/controller 29 | operator: Exists 30 | - key: node.kubernetes.io/not-ready 31 | operator: Exists 32 | %{~ for key in daemonset_tolerations ~} 33 | - key: ${key} 34 | operator: Exists 35 | %{~ endfor ~} 36 | initContainers: 37 | - name: install-cni 38 | image: ${flannel_cni_image} 39 | command: ["/install-cni.sh"] 40 | env: 41 | - name: CNI_NETWORK_CONFIG 42 | valueFrom: 43 | configMapKeyRef: 44 | name: flannel-config 45 | key: cni-conf.json 46 | volumeMounts: 47 | - name: cni-bin-dir 48 | mountPath: /host/opt/cni/bin/ 49 | - name: cni-conf-dir 50 | mountPath: /host/etc/cni/net.d 51 | containers: 52 | - name: flannel 53 | image: ${flannel_image} 54 | command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface=$(POD_IP)"] 55 | env: 56 | - name: POD_NAME 57 | valueFrom: 58 | fieldRef: 59 | fieldPath: metadata.name 60 | - name: POD_NAMESPACE 61 | valueFrom: 62 | fieldRef: 63 | fieldPath: metadata.namespace 64 | - name: POD_IP 65 | valueFrom: 66 | fieldRef: 67 | fieldPath: status.podIP 68 | securityContext: 69 | privileged: true 70 | resources: 71 | requests: 72 | cpu: 100m 73 | volumeMounts: 74 | - name: flannel-config 75 | mountPath: /etc/kube-flannel/ 76 | - name: run-flannel 77 | mountPath: /run/flannel 78 | - name: xtables-lock 79 | mountPath: /run/xtables.lock 80 | volumes: 81 | - name: flannel-config 82 | configMap: 83 | name: flannel-config 84 | - name: run-flannel 85 | hostPath: 86 | path: /run/flannel 87 | # Used by install-cni 88 | - name: cni-bin-dir 89 | hostPath: 90 | path: /opt/cni/bin 91 | - name: cni-conf-dir 92 | hostPath: 93 | type: DirectoryOrCreate 94 | path: /etc/cni/net.d 95 | # Access iptables concurrently 96 | - name: xtables-lock 97 | hostPath: 98 | type: FileOrCreate 99 | path: /run/xtables.lock 100 | -------------------------------------------------------------------------------- /manifests.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Kubernetes static pod manifests map 3 | # {static-manifests/manifest.yaml => content } 4 | static_manifests = { 5 | for name in fileset("${path.module}/resources/static-manifests", "*.yaml") : 6 | "static-manifests/${name}" => templatefile( 7 | "${path.module}/resources/static-manifests/${name}", 8 | { 9 | kube_apiserver_image = var.container_images["kube_apiserver"] 10 | kube_controller_manager_image = var.container_images["kube_controller_manager"] 11 | kube_scheduler_image = var.container_images["kube_scheduler"] 12 | 13 | etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers)) 14 | pod_cidr = var.pod_cidr 15 | service_cidr = var.service_cidr 16 | 17 | service_account_issuer = var.service_account_issuer 18 | aggregation_flags = var.enable_aggregation ? indent(4, local.aggregation_flags) : "" 19 | } 20 | ) 21 | } 22 | 23 | # Kubernetes control plane manifests map 24 | # { manifests/manifest.yaml => content } 25 | manifests = merge({ 26 | for name in fileset("${path.module}/resources/manifests", "**/*.yaml") : 27 | "manifests/${name}" => templatefile( 28 | "${path.module}/resources/manifests/${name}", 29 | { 30 | server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) 31 | apiserver_host = var.api_servers[0] 32 | apiserver_port = var.external_apiserver_port 33 | token_id = random_password.bootstrap-token-id.result 34 | token_secret = random_password.bootstrap-token-secret.result 35 | } 36 | ) 37 | }, 38 | # CoreDNS manifests (optional) 39 | { 40 | for name in fileset("${path.module}/resources/coredns", "*.yaml") : 41 | "manifests/coredns/${name}" => templatefile( 42 | "${path.module}/resources/coredns/${name}", 43 | { 44 | coredns_image = var.container_images["coredns"] 45 | control_plane_replicas = max(2, length(var.etcd_servers)) 46 | cluster_domain_suffix = var.cluster_domain_suffix 47 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 48 | } 49 | ) if var.components.enable && var.components.coredns.enable 50 | }, 51 | # kube-proxy manifests (optional) 52 | { 53 | for name in fileset("${path.module}/resources/kube-proxy", "*.yaml") : 54 | "manifests/kube-proxy/${name}" => templatefile( 55 | "${path.module}/resources/kube-proxy/${name}", 56 | { 57 | kube_proxy_image = var.container_images["kube_proxy"] 58 | pod_cidr = var.pod_cidr 59 | daemonset_tolerations = var.daemonset_tolerations 60 | } 61 | ) if var.components.enable && var.components.kube_proxy.enable && var.networking != "cilium" 62 | } 63 | ) 64 | } 65 | 66 | locals { 67 | aggregation_flags = <