├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── config.yml ├── dependabot.yaml ├── issue_template.md ├── release.yaml └── workflows │ └── publish.yaml ├── .gitignore ├── CHANGES.md ├── CONTRIBUTING.md ├── DCO ├── LICENSE ├── README.md ├── addons ├── cilium │ ├── cluster-role-binding.tf │ ├── cluster-role.tf │ ├── config.tf │ ├── daemonset.tf │ ├── deployment.tf │ ├── service-account.tf │ ├── variables.tf │ └── versions.tf ├── coredns │ ├── cluster-role.tf │ ├── config.tf │ ├── deployment.tf │ ├── service-account.tf │ ├── service.tf │ ├── variables.tf │ └── versions.tf ├── flannel │ ├── cluster-role-binding.tf │ ├── cluster-role.tf │ ├── config.tf │ ├── daemonset.tf │ ├── service-account.tf │ ├── variables.tf │ └── versions.tf ├── grafana │ ├── config.yaml │ ├── dashboards-coredns.yaml │ ├── dashboards-etcd.yaml │ ├── dashboards-k8s-network.yaml │ ├── dashboards-k8s-nodes.yaml │ ├── dashboards-k8s-resources-1.yaml │ ├── dashboards-k8s-resources-2.yaml │ ├── dashboards-k8s.yaml │ ├── dashboards-nginx-ingress.yaml │ ├── dashboards-node-exporter.yaml │ ├── dashboards-prom.yaml │ ├── datasources.yaml │ ├── deployment.yaml │ ├── providers.yaml │ └── service.yaml ├── nginx-ingress │ ├── aws │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── deployment.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── cluster-role.yaml │ │ │ ├── role-binding.yaml │ │ │ └── role.yaml │ │ └── service.yaml │ ├── azure │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── deployment.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── cluster-role.yaml │ │ │ ├── role-binding.yaml │ │ │ └── role.yaml │ │ └── service.yaml │ ├── bare-metal │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── deployment.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── cluster-role.yaml │ │ │ ├── role-binding.yaml │ │ │ └── role.yaml │ │ └── service.yaml │ ├── digital-ocean │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── daemonset.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── cluster-role.yaml │ │ │ ├── role-binding.yaml │ │ │ └── role.yaml │ │ └── service.yaml │ └── google-cloud │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── deployment.yaml │ │ ├── rbac │ │ ├── cluster-role-binding.yaml │ │ ├── cluster-role.yaml │ │ ├── role-binding.yaml │ │ └── role.yaml │ │ └── service.yaml └── prometheus │ ├── 0-namespace.yaml │ ├── config.yaml │ ├── deployment.yaml │ ├── discovery │ ├── kube-controller-manager.yaml │ ├── kube-proxy.yaml │ └── kube-scheduler.yaml │ ├── exporters │ ├── kube-state-metrics │ │ ├── cluster-role-binding.yaml │ │ ├── cluster-role.yaml │ │ ├── deployment.yaml │ │ ├── service-account.yaml │ │ └── service.yaml │ └── node-exporter │ │ ├── daemonset.yaml │ │ ├── service-account.yaml │ │ └── service.yaml │ ├── network-policy.yaml │ ├── rbac │ ├── cluster-role-binding.yaml │ └── cluster-role.yaml │ ├── rules.yaml │ ├── service-account.yaml │ └── service.yaml ├── aws ├── fedora-coreos │ └── kubernetes │ │ ├── LICENSE │ │ ├── README.md │ │ ├── ami.tf │ │ ├── bootstrap.tf │ │ ├── butane │ │ └── controller.yaml │ │ ├── controllers.tf │ │ ├── network.tf │ │ ├── nlb.tf │ │ ├── outputs.tf │ │ ├── security.tf │ │ ├── ssh.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── workers.tf │ │ └── workers │ │ ├── ami.tf │ │ ├── butane │ │ └── worker.yaml │ │ ├── ingress.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── workers.tf └── flatcar-linux │ └── kubernetes │ ├── LICENSE │ ├── README.md │ ├── ami.tf │ ├── bootstrap.tf │ ├── butane │ └── controller.yaml │ ├── controllers.tf │ ├── network.tf │ ├── nlb.tf │ ├── outputs.tf │ ├── security.tf │ ├── ssh.tf │ ├── variables.tf │ ├── versions.tf │ ├── workers.tf │ └── workers │ ├── ami.tf │ ├── butane │ └── worker.yaml │ ├── ingress.tf │ ├── outputs.tf │ ├── variables.tf │ ├── versions.tf │ └── workers.tf ├── azure ├── fedora-coreos │ └── kubernetes │ │ ├── LICENSE │ │ ├── README.md │ │ ├── bootstrap.tf │ │ ├── butane │ │ └── controller.yaml │ │ ├── controllers.tf │ │ ├── lb.tf │ │ ├── locals.tf │ │ ├── network.tf │ │ ├── outputs.tf │ │ ├── security.tf │ │ ├── ssh.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── workers.tf │ │ └── workers │ │ ├── butane │ │ └── worker.yaml │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── workers.tf └── flatcar-linux │ └── kubernetes │ ├── LICENSE │ ├── README.md │ ├── bootstrap.tf │ ├── butane │ └── controller.yaml │ ├── controllers.tf │ ├── lb.tf │ ├── locals.tf │ ├── network.tf │ ├── outputs.tf │ ├── security.tf │ ├── ssh.tf │ ├── variables.tf │ ├── versions.tf │ ├── workers.tf │ └── workers │ ├── butane │ └── worker.yaml │ ├── variables.tf │ ├── versions.tf │ └── workers.tf ├── bare-metal ├── fedora-coreos │ └── kubernetes │ │ ├── LICENSE │ │ ├── README.md │ │ ├── bootstrap.tf │ │ ├── butane │ │ └── controller.yaml │ │ ├── outputs.tf │ │ ├── profiles.tf │ │ ├── ssh.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── worker │ │ ├── butane │ │ │ └── worker.yaml │ │ ├── matchbox.tf │ │ ├── ssh.tf │ │ ├── variables.tf │ │ └── versions.tf │ │ └── workers.tf └── flatcar-linux │ └── kubernetes │ ├── LICENSE │ ├── README.md │ ├── bootstrap.tf │ ├── butane │ ├── controller.yaml │ └── install.yaml │ ├── outputs.tf │ ├── profiles.tf │ ├── ssh.tf │ ├── variables.tf │ ├── versions.tf │ ├── worker │ ├── butane │ │ ├── install.yaml │ │ └── worker.yaml │ ├── matchbox.tf │ ├── ssh.tf │ ├── variables.tf │ └── versions.tf │ └── workers.tf ├── digital-ocean ├── fedora-coreos │ └── kubernetes │ │ ├── LICENSE │ │ ├── README.md │ │ ├── bootstrap.tf │ │ ├── butane │ │ ├── controller.yaml │ │ └── worker.yaml │ │ ├── controllers.tf │ │ ├── network.tf │ │ ├── outputs.tf │ │ ├── ssh.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── workers.tf └── flatcar-linux │ └── kubernetes │ ├── LICENSE │ ├── README.md │ ├── bootstrap.tf │ ├── butane │ ├── controller.yaml │ └── worker.yaml │ ├── controllers.tf │ ├── network.tf │ ├── outputs.tf │ ├── ssh.tf │ ├── variables.tf │ ├── versions.tf │ └── workers.tf ├── docs ├── CNAME ├── addons │ ├── fleetlock.md │ ├── grafana.md │ ├── ingress.md │ ├── overview.md │ └── prometheus.md ├── advanced │ ├── arm64.md │ ├── customization.md │ ├── nodes.md │ ├── overview.md │ └── worker-pools.md ├── announce.md ├── architecture │ ├── aws.md │ ├── azure.md │ ├── bare-metal.md │ ├── concepts.md │ ├── digitalocean.md │ ├── google-cloud.md │ └── operating-systems.md ├── fedora-coreos │ ├── aws.md │ ├── azure.md │ ├── bare-metal.md │ ├── digitalocean.md │ └── google-cloud.md ├── flatcar-linux │ ├── aws.md │ ├── azure.md │ ├── bare-metal.md │ ├── digitalocean.md │ └── google-cloud.md ├── img │ ├── favicon.ico │ ├── grafana-etcd.png │ ├── grafana-resources-cluster.png │ ├── grafana-usage-cluster.png │ ├── grafana-usage-node.png │ ├── prometheus-alerts.png │ ├── prometheus-graph.png │ ├── prometheus-targets.png │ ├── spin.png │ ├── typhoon-aws-load-balancing.png │ ├── typhoon-azure-load-balancing.png │ ├── typhoon-digitalocean-load-balancing.png │ ├── typhoon-gcp-load-balancing.png │ ├── typhoon-logo.png │ └── typhoon.png ├── index.md └── topics │ ├── faq.md │ ├── hardware.md │ ├── maintenance.md │ ├── performance.md │ └── security.md ├── google-cloud ├── fedora-coreos │ └── kubernetes │ │ ├── LICENSE │ │ ├── README.md │ │ ├── apiserver.tf │ │ ├── bootstrap.tf │ │ ├── butane │ │ └── controller.yaml │ │ ├── controllers.tf │ │ ├── image.tf │ │ ├── ingress.tf │ │ ├── network.tf │ │ ├── outputs.tf │ │ ├── ssh.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ ├── workers.tf │ │ └── workers │ │ ├── butane │ │ └── worker.yaml │ │ ├── image.tf │ │ ├── outputs.tf │ │ ├── target_pool.tf │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── workers.tf └── flatcar-linux │ └── kubernetes │ ├── LICENSE │ ├── README.md │ ├── apiserver.tf │ ├── bootstrap.tf │ ├── butane │ └── controller.yaml │ ├── controllers.tf │ ├── image.tf │ ├── ingress.tf │ ├── network.tf │ ├── outputs.tf │ ├── ssh.tf │ ├── variables.tf │ ├── versions.tf │ ├── workers.tf │ └── workers │ ├── butane │ └── worker.yaml │ ├── image.tf │ ├── outputs.tf │ ├── target_pool.tf │ ├── variables.tf │ ├── versions.tf │ └── workers.tf ├── mkdocs.yml ├── requirements.txt └── theme └── main.html /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [poseidon] 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report a bug to improve the project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Description** 13 | 14 | A clear and concise description of what the bug is. 15 | 16 | **Steps to Reproduce** 17 | 18 | Provide clear steps to reproduce the bug. 19 | 20 | - [ ] Relevant error messages if appropriate (concise, not a dump of everything). 21 | - [ ] Explored using a vanilla cluster from the [tutorials](https://typhoon.psdn.io/#documentation). Ruled out [customizations](https://typhoon.psdn.io/advanced/customization/). 22 | 23 | **Expected behavior** 24 | 25 | A clear and concise description of what you expected to happen. 26 | 27 | **Environment** 28 | 29 | * Platform: aws, azure, bare-metal, google-cloud, digital-ocean 30 | * OS: fedora-coreos, flatcar-linux (include release version) 31 | * Release: Typhoon version or Git SHA (reporting latest is **not** helpful) 32 | * Terraform: `terraform version` (reporting latest is **not** helpful) 33 | * Plugins: Provider plugin versions (reporting latest is **not** helpful) 34 | 35 | **Possible Solution** 36 | 37 | 38 | 39 | Link to a PR or description. 40 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Security 4 | url: https://typhoon.psdn.io/topics/security/ 5 | about: Report security vulnerabilities 6 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | -------------------------------------------------------------------------------- /.github/issue_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Enhancement 4 | 5 | ### Overview 6 | 7 | One paragraph explanation of the enhancement. 8 | 9 | ### Motivation 10 | 11 | Describe the motivation and what problem this solves. 12 | 13 | ### Tradeoffs 14 | 15 | What are the pros and cons of this feature? How will it be exercised and maintained? 16 | -------------------------------------------------------------------------------- /.github/release.yaml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: Contributions 4 | labels: 5 | - '*' 6 | exclude: 7 | labels: 8 | - dependencies 9 | - no-release-note 10 | - title: Dependencies 11 | labels: 12 | - dependencies 13 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: publish 2 | on: 3 | push: 4 | branches: 5 | - release-docs 6 | jobs: 7 | mkdocs: 8 | name: mkdocs 9 | uses: poseidon/matchbox/.github/workflows/mkdocs-pages.yaml@main 10 | # Add content write for GitHub Pages 11 | permissions: 12 | contents: write 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | site/ 2 | venv/ 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Developer Certificate of Origin 4 | 5 | By contributing, you agree to the Linux Foundation's Developer Certificate of Origin ([DCO](DCO)). The DCO is a statement that you, the contributor, have the legal right to make your contribution and understand the contribution will be distributed as part of this project. 6 | -------------------------------------------------------------------------------- /DCO: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 1 Letterman Drive 6 | Suite D4700 7 | San Francisco, CA, 94129 8 | 9 | Everyone is permitted to copy and distribute verbatim copies of this 10 | license document, but changing it is not allowed. 11 | 12 | 13 | Developer's Certificate of Origin 1.1 14 | 15 | By making a contribution to this project, I certify that: 16 | 17 | (a) The contribution was created in whole or in part by me and I 18 | have the right to submit it under the open source license 19 | indicated in the file; or 20 | 21 | (b) The contribution is based upon previous work that, to the best 22 | of my knowledge, is covered under an appropriate open source 23 | license and I have the right under that license to submit that 24 | work with modifications, whether created in whole or in part 25 | by me, under the same open source license (unless I am 26 | permitted to submit under a different license), as indicated 27 | in the file; or 28 | 29 | (c) The contribution was provided directly to me by some other 30 | person who certified (a), (b) or (c) and I have not modified 31 | it. 32 | 33 | (d) I understand and agree that this project and the contribution 34 | are public and that a record of the contribution (including all 35 | personal information I submit with it, including my sign-off) is 36 | maintained indefinitely and may be redistributed consistent with 37 | this project or the open source license(s) involved. 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /addons/cilium/cluster-role-binding.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_cluster_role_binding" "operator" { 2 | metadata { 3 | name = "cilium-operator" 4 | } 5 | 6 | role_ref { 7 | api_group = "rbac.authorization.k8s.io" 8 | kind = "ClusterRole" 9 | name = "cilium-operator" 10 | } 11 | 12 | subject { 13 | kind = "ServiceAccount" 14 | name = "cilium-operator" 15 | namespace = "kube-system" 16 | } 17 | } 18 | 19 | resource "kubernetes_cluster_role_binding" "agent" { 20 | metadata { 21 | name = "cilium-agent" 22 | } 23 | 24 | role_ref { 25 | api_group = "rbac.authorization.k8s.io" 26 | kind = "ClusterRole" 27 | name = "cilium-agent" 28 | } 29 | 30 | subject { 31 | kind = "ServiceAccount" 32 | name = "cilium-agent" 33 | namespace = "kube-system" 34 | } 35 | } 36 | 37 | -------------------------------------------------------------------------------- /addons/cilium/service-account.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_service_account" "operator" { 2 | metadata { 3 | name = "cilium-operator" 4 | namespace = "kube-system" 5 | } 6 | automount_service_account_token = false 7 | } 8 | 9 | resource "kubernetes_service_account" "agent" { 10 | metadata { 11 | name = "cilium-agent" 12 | namespace = "kube-system" 13 | } 14 | automount_service_account_token = false 15 | } 16 | -------------------------------------------------------------------------------- /addons/cilium/variables.tf: -------------------------------------------------------------------------------- 1 | variable "pod_cidr" { 2 | type = string 3 | description = "CIDR IP range to assign Kubernetes pods" 4 | default = "10.20.0.0/14" 5 | } 6 | 7 | variable "daemonset_tolerations" { 8 | type = list(string) 9 | description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])" 10 | default = [] 11 | } 12 | 13 | variable "enable_hubble" { 14 | type = bool 15 | description = "Run the embedded Hubble Server and mount hubble-server-certs Secret" 16 | default = true 17 | } 18 | -------------------------------------------------------------------------------- /addons/cilium/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kubernetes = { 4 | source = "hashicorp/kubernetes" 5 | version = "~> 2.8" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /addons/coredns/cluster-role.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_cluster_role" "coredns" { 2 | metadata { 3 | name = "system:coredns" 4 | } 5 | rule { 6 | api_groups = [""] 7 | resources = [ 8 | "endpoints", 9 | "services", 10 | "pods", 11 | "namespaces", 12 | ] 13 | verbs = [ 14 | "list", 15 | "watch", 16 | ] 17 | } 18 | rule { 19 | api_groups = [""] 20 | resources = [ 21 | "nodes", 22 | ] 23 | verbs = [ 24 | "get", 25 | ] 26 | } 27 | rule { 28 | api_groups = ["discovery.k8s.io"] 29 | resources = [ 30 | "endpointslices", 31 | ] 32 | verbs = [ 33 | "list", 34 | "watch", 35 | ] 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /addons/coredns/config.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_config_map" "coredns" { 2 | metadata { 3 | name = "coredns" 4 | namespace = "kube-system" 5 | } 6 | data = { 7 | "Corefile" = <<-EOF 8 | .:53 { 9 | errors 10 | health { 11 | lameduck 5s 12 | } 13 | ready 14 | log . { 15 | class error 16 | } 17 | kubernetes ${var.cluster_domain_suffix} in-addr.arpa ip6.arpa { 18 | pods insecure 19 | fallthrough in-addr.arpa ip6.arpa 20 | } 21 | prometheus :9153 22 | forward . /etc/resolv.conf 23 | cache 30 24 | loop 25 | reload 26 | loadbalance 27 | } 28 | EOF 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /addons/coredns/service-account.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_service_account" "coredns" { 2 | metadata { 3 | name = "coredns" 4 | namespace = "kube-system" 5 | } 6 | automount_service_account_token = false 7 | } 8 | 9 | 10 | resource "kubernetes_cluster_role_binding" "coredns" { 11 | metadata { 12 | name = "system:coredns" 13 | } 14 | role_ref { 15 | api_group = "rbac.authorization.k8s.io" 16 | kind = "ClusterRole" 17 | name = "system:coredns" 18 | } 19 | subject { 20 | kind = "ServiceAccount" 21 | name = "coredns" 22 | namespace = "kube-system" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /addons/coredns/service.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_service" "coredns" { 2 | metadata { 3 | name = "coredns" 4 | namespace = "kube-system" 5 | labels = { 6 | "k8s-app" = "coredns" 7 | "kubernetes.io/name" = "CoreDNS" 8 | } 9 | annotations = { 10 | "prometheus.io/scrape" = "true" 11 | "prometheus.io/port" = "9153" 12 | } 13 | } 14 | spec { 15 | type = "ClusterIP" 16 | cluster_ip = var.cluster_dns_service_ip 17 | selector = { 18 | k8s-app = "coredns" 19 | } 20 | port { 21 | name = "dns" 22 | protocol = "UDP" 23 | port = 53 24 | } 25 | port { 26 | name = "dns-tcp" 27 | protocol = "TCP" 28 | port = 53 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /addons/coredns/variables.tf: -------------------------------------------------------------------------------- 1 | variable "replicas" { 2 | type = number 3 | description = "CoreDNS replica count" 4 | default = 2 5 | } 6 | 7 | variable "cluster_dns_service_ip" { 8 | description = "Must be set to `cluster_dns_service_ip` output by cluster" 9 | default = "10.3.0.10" 10 | } 11 | 12 | variable "cluster_domain_suffix" { 13 | description = "Must be set to `cluster_domain_suffix` output by cluster" 14 | default = "cluster.local" 15 | } 16 | -------------------------------------------------------------------------------- /addons/coredns/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kubernetes = { 4 | source = "hashicorp/kubernetes" 5 | version = "~> 2.8" 6 | } 7 | } 8 | } 9 | 10 | -------------------------------------------------------------------------------- /addons/flannel/cluster-role-binding.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_cluster_role_binding" "flannel" { 2 | metadata { 3 | name = "flannel" 4 | } 5 | 6 | role_ref { 7 | api_group = "rbac.authorization.k8s.io" 8 | kind = "ClusterRole" 9 | name = "flannel" 10 | } 11 | 12 | subject { 13 | kind = "ServiceAccount" 14 | name = "flannel" 15 | namespace = "kube-system" 16 | } 17 | } 18 | 19 | -------------------------------------------------------------------------------- /addons/flannel/cluster-role.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_cluster_role" "flannel" { 2 | metadata { 3 | name = "flannel" 4 | } 5 | 6 | rule { 7 | api_groups = [""] 8 | resources = ["pods"] 9 | verbs = ["get"] 10 | } 11 | 12 | rule { 13 | api_groups = [""] 14 | resources = ["nodes"] 15 | verbs = ["list", "watch"] 16 | } 17 | 18 | rule { 19 | api_groups = [""] 20 | resources = ["nodes/status"] 21 | verbs = ["patch"] 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /addons/flannel/config.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_config_map" "config" { 2 | metadata { 3 | name = "flannel-config" 4 | namespace = "kube-system" 5 | labels = { 6 | k8s-app = "flannel" 7 | tier = "node" 8 | } 9 | } 10 | 11 | data = { 12 | "cni-conf.json" = <<-EOF 13 | { 14 | "name": "cbr0", 15 | "cniVersion": "0.3.1", 16 | "plugins": [ 17 | { 18 | "type": "flannel", 19 | "delegate": { 20 | "hairpinMode": true, 21 | "isDefaultGateway": true 22 | } 23 | }, 24 | { 25 | "type": "portmap", 26 | "capabilities": { 27 | "portMappings": true 28 | } 29 | } 30 | ] 31 | } 32 | EOF 33 | "net-conf.json" = <<-EOF 34 | { 35 | "Network": "${var.pod_cidr}", 36 | "Backend": { 37 | "Type": "vxlan", 38 | "Port": 8472 39 | } 40 | } 41 | EOF 42 | } 43 | } 44 | 45 | -------------------------------------------------------------------------------- /addons/flannel/service-account.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_service_account" "flannel" { 2 | metadata { 3 | name = "flannel" 4 | namespace = "kube-system" 5 | } 6 | } 7 | 8 | -------------------------------------------------------------------------------- /addons/flannel/variables.tf: -------------------------------------------------------------------------------- 1 | variable "pod_cidr" { 2 | type = string 3 | description = "CIDR IP range to assign Kubernetes pods" 4 | default = "10.20.0.0/14" 5 | } 6 | 7 | variable "daemonset_tolerations" { 8 | type = list(string) 9 | description = "List of additional taint keys kube-system DaemonSets should tolerate (e.g. ['custom-role', 'gpu-role'])" 10 | default = [] 11 | } 12 | -------------------------------------------------------------------------------- /addons/flannel/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kubernetes = { 4 | source = "hashicorp/kubernetes" 5 | version = "~> 2.8" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /addons/grafana/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-config 5 | namespace: monitoring 6 | data: 7 | custom.ini: |+ 8 | [server] 9 | http_port = 8080 10 | 11 | [paths] 12 | data = /var/lib/grafana 13 | plugins = /var/lib/grafana/plugins 14 | provisioning = /etc/grafana/provisioning 15 | 16 | [users] 17 | allow_sign_up = false 18 | allow_org_create = false 19 | # viewers can edit/inspect, but not save 20 | viewers_can_edit = true 21 | 22 | # Disable login form, since Grafana always creates an admin user 23 | [auth] 24 | disable_login_form = true 25 | 26 | # Disable the user/pass login system 27 | [auth.basic] 28 | enabled = false 29 | 30 | # Allow anonymous authentication with view-only authorization 31 | [auth.anonymous] 32 | enabled = true 33 | org_role = Viewer 34 | 35 | [analytics] 36 | reporting_enabled = false 37 | -------------------------------------------------------------------------------- /addons/grafana/datasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-datasources 5 | namespace: monitoring 6 | data: 7 | prometheus.yaml: |+ 8 | apiVersion: 1 9 | datasources: 10 | - name: prometheus 11 | type: prometheus 12 | access: proxy 13 | url: http://prometheus.monitoring.svc.cluster.local 14 | version: 1 15 | editable: false 16 | loki.yaml: |+ 17 | apiVersion: 1 18 | datasources: 19 | - name: loki 20 | type: loki 21 | access: proxy 22 | url: http://loki.monitoring.svc.cluster.local 23 | version: 1 24 | editable: false 25 | -------------------------------------------------------------------------------- /addons/grafana/providers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-providers 5 | namespace: monitoring 6 | data: 7 | providers.yaml: |+ 8 | apiVersion: 1 9 | providers: 10 | - name: 'default' 11 | ordId: 1 12 | folder: '' 13 | type: file 14 | options: 15 | path: /etc/grafana/dashboards 16 | -------------------------------------------------------------------------------- /addons/grafana/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '8080' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: grafana 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 8080 19 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/public 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: nginx-ingress-controller 26 | image: registry.k8s.io/ingress-nginx/controller:v1.5.1 27 | args: 28 | - /nginx-ingress-controller 29 | - --controller-class=k8s.io/public 30 | - --ingress-class=public 31 | # use downward API 32 | env: 33 | - name: POD_NAME 34 | valueFrom: 35 | fieldRef: 36 | fieldPath: metadata.name 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | ports: 42 | - name: http 43 | containerPort: 80 44 | hostPort: 80 45 | - name: https 46 | containerPort: 443 47 | hostPort: 443 48 | - name: health 49 | containerPort: 10254 50 | hostPort: 10254 51 | livenessProbe: 52 | httpGet: 53 | path: /healthz 54 | port: 10254 55 | scheme: HTTP 56 | initialDelaySeconds: 10 57 | periodSeconds: 10 58 | successThreshold: 1 59 | failureThreshold: 3 60 | timeoutSeconds: 5 61 | readinessProbe: 62 | httpGet: 63 | path: /healthz 64 | port: 10254 65 | scheme: HTTP 66 | periodSeconds: 10 67 | successThreshold: 1 68 | failureThreshold: 3 69 | timeoutSeconds: 5 70 | lifecycle: 71 | preStop: 72 | exec: 73 | command: 74 | - /wait-shutdown 75 | securityContext: 76 | capabilities: 77 | add: 78 | - NET_BIND_SERVICE 79 | drop: 80 | - ALL 81 | runAsUser: 101 # www-data 82 | restartPolicy: Always 83 | terminationGracePeriodSeconds: 300 84 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | - apiGroups: 63 | - discovery.k8s.io 64 | resources: 65 | - "endpointslices" 66 | verbs: 67 | - get 68 | - list 69 | - watch 70 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | - endpoints 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - configmaps 20 | resourceNames: 21 | # Defaults to "-" 22 | # Here: "-" 23 | # This has to be adapted if you change either parameter 24 | # when launching the nginx-ingress-controller. 25 | - "ingress-controller-leader-public" 26 | verbs: 27 | - get 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - configmaps 33 | verbs: 34 | - create 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - endpoints 39 | verbs: 40 | - get 41 | - apiGroups: 42 | - "coordination.k8s.io" 43 | resources: 44 | - leases 45 | verbs: 46 | - create 47 | - get 48 | - update 49 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/public 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: nginx-ingress-controller 26 | image: registry.k8s.io/ingress-nginx/controller:v1.5.1 27 | args: 28 | - /nginx-ingress-controller 29 | - --controller-class=k8s.io/public 30 | - --ingress-class=public 31 | # use downward API 32 | env: 33 | - name: POD_NAME 34 | valueFrom: 35 | fieldRef: 36 | fieldPath: metadata.name 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | ports: 42 | - name: http 43 | containerPort: 80 44 | hostPort: 80 45 | - name: https 46 | containerPort: 443 47 | hostPort: 443 48 | - name: health 49 | containerPort: 10254 50 | hostPort: 10254 51 | livenessProbe: 52 | httpGet: 53 | path: /healthz 54 | port: 10254 55 | scheme: HTTP 56 | initialDelaySeconds: 10 57 | periodSeconds: 10 58 | successThreshold: 1 59 | failureThreshold: 3 60 | timeoutSeconds: 5 61 | readinessProbe: 62 | httpGet: 63 | path: /healthz 64 | port: 10254 65 | scheme: HTTP 66 | periodSeconds: 10 67 | successThreshold: 1 68 | failureThreshold: 3 69 | timeoutSeconds: 5 70 | lifecycle: 71 | preStop: 72 | exec: 73 | command: 74 | - /wait-shutdown 75 | securityContext: 76 | capabilities: 77 | add: 78 | - NET_BIND_SERVICE 79 | drop: 80 | - ALL 81 | runAsUser: 101 # www-data 82 | restartPolicy: Always 83 | terminationGracePeriodSeconds: 300 84 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | - apiGroups: 63 | - discovery.k8s.io 64 | resources: 65 | - "endpointslices" 66 | verbs: 67 | - get 68 | - list 69 | - watch 70 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | - endpoints 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - configmaps 20 | resourceNames: 21 | # Defaults to "-" 22 | # Here: "-" 23 | # This has to be adapted if you change either parameter 24 | # when launching the nginx-ingress-controller. 25 | - "ingress-controller-leader-public" 26 | verbs: 27 | - get 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - configmaps 33 | verbs: 34 | - create 35 | - apiGroups: 36 | - "coordination.k8s.io" 37 | resources: 38 | - leases 39 | verbs: 40 | - create 41 | - get 42 | - update 43 | 44 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/public 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: nginx-ingress-controller 26 | image: registry.k8s.io/ingress-nginx/controller:v1.5.1 27 | args: 28 | - /nginx-ingress-controller 29 | - --controller-class=k8s.io/public 30 | - --ingress-class=public 31 | # use downward API 32 | env: 33 | - name: POD_NAME 34 | valueFrom: 35 | fieldRef: 36 | fieldPath: metadata.name 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | ports: 42 | - name: http 43 | containerPort: 80 44 | - name: https 45 | containerPort: 443 46 | - name: health 47 | containerPort: 10254 48 | livenessProbe: 49 | httpGet: 50 | path: /healthz 51 | port: 10254 52 | scheme: HTTP 53 | initialDelaySeconds: 10 54 | periodSeconds: 10 55 | successThreshold: 1 56 | failureThreshold: 3 57 | timeoutSeconds: 5 58 | readinessProbe: 59 | httpGet: 60 | path: /healthz 61 | port: 10254 62 | scheme: HTTP 63 | periodSeconds: 10 64 | successThreshold: 1 65 | failureThreshold: 3 66 | timeoutSeconds: 5 67 | lifecycle: 68 | preStop: 69 | exec: 70 | command: 71 | - /wait-shutdown 72 | securityContext: 73 | capabilities: 74 | add: 75 | - NET_BIND_SERVICE 76 | drop: 77 | - ALL 78 | runAsUser: 101 # www-data 79 | restartPolicy: Always 80 | terminationGracePeriodSeconds: 300 81 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | - apiGroups: 63 | - discovery.k8s.io 64 | resources: 65 | - "endpointslices" 66 | verbs: 67 | - get 68 | - list 69 | - watch 70 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | - endpoints 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - configmaps 20 | resourceNames: 21 | # Defaults to "-" 22 | # Here: "-" 23 | # This has to be adapted if you change either parameter 24 | # when launching the nginx-ingress-controller. 25 | - "ingress-controller-leader-public" 26 | verbs: 27 | - get 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - configmaps 33 | verbs: 34 | - create 35 | - apiGroups: 36 | - "coordination.k8s.io" 37 | resources: 38 | - leases 39 | verbs: 40 | - create 41 | - get 42 | - update 43 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | clusterIP: 10.3.0.12 12 | selector: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | ports: 16 | - name: http 17 | protocol: TCP 18 | port: 80 19 | targetPort: 80 20 | - name: https 21 | protocol: TCP 22 | port: 443 23 | targetPort: 443 24 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/public 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | updateStrategy: 8 | type: RollingUpdate 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: nginx-ingress-controller 26 | image: registry.k8s.io/ingress-nginx/controller:v1.5.1 27 | args: 28 | - /nginx-ingress-controller 29 | - --controller-class=k8s.io/public 30 | - --ingress-class=public 31 | # use downward API 32 | env: 33 | - name: POD_NAME 34 | valueFrom: 35 | fieldRef: 36 | fieldPath: metadata.name 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | ports: 42 | - name: http 43 | containerPort: 80 44 | hostPort: 80 45 | - name: https 46 | containerPort: 443 47 | hostPort: 443 48 | - name: health 49 | containerPort: 10254 50 | hostPort: 10254 51 | livenessProbe: 52 | httpGet: 53 | path: /healthz 54 | port: 10254 55 | scheme: HTTP 56 | initialDelaySeconds: 10 57 | periodSeconds: 10 58 | successThreshold: 1 59 | failureThreshold: 3 60 | timeoutSeconds: 5 61 | readinessProbe: 62 | httpGet: 63 | path: /healthz 64 | port: 10254 65 | scheme: HTTP 66 | periodSeconds: 10 67 | successThreshold: 1 68 | failureThreshold: 3 69 | timeoutSeconds: 5 70 | lifecycle: 71 | preStop: 72 | exec: 73 | command: 74 | - /wait-shutdown 75 | securityContext: 76 | capabilities: 77 | add: 78 | - NET_BIND_SERVICE 79 | drop: 80 | - ALL 81 | runAsUser: 101 # www-data 82 | restartPolicy: Always 83 | terminationGracePeriodSeconds: 300 84 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | - apiGroups: 63 | - discovery.k8s.io 64 | resources: 65 | - "endpointslices" 66 | verbs: 67 | - get 68 | - list 69 | - watch 70 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | - endpoints 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - configmaps 20 | resourceNames: 21 | # Defaults to "-" 22 | # Here: "-" 23 | # This has to be adapted if you change either parameter 24 | # when launching the nginx-ingress-controller. 25 | - "ingress-controller-leader-public" 26 | verbs: 27 | - get 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - configmaps 33 | verbs: 34 | - create 35 | - apiGroups: 36 | - "coordination.k8s.io" 37 | resources: 38 | - leases 39 | verbs: 40 | - create 41 | - get 42 | - update 43 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/public 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: nginx-ingress-controller 26 | image: registry.k8s.io/ingress-nginx/controller:v1.5.1 27 | args: 28 | - /nginx-ingress-controller 29 | - --controller-class=k8s.io/public 30 | - --ingress-class=public 31 | # use downward API 32 | env: 33 | - name: POD_NAME 34 | valueFrom: 35 | fieldRef: 36 | fieldPath: metadata.name 37 | - name: POD_NAMESPACE 38 | valueFrom: 39 | fieldRef: 40 | fieldPath: metadata.namespace 41 | ports: 42 | - name: http 43 | containerPort: 80 44 | hostPort: 80 45 | - name: https 46 | containerPort: 443 47 | hostPort: 443 48 | - name: health 49 | containerPort: 10254 50 | hostPort: 10254 51 | livenessProbe: 52 | httpGet: 53 | path: /healthz 54 | port: 10254 55 | scheme: HTTP 56 | initialDelaySeconds: 10 57 | periodSeconds: 10 58 | successThreshold: 1 59 | failureThreshold: 3 60 | timeoutSeconds: 5 61 | readinessProbe: 62 | httpGet: 63 | path: /healthz 64 | port: 10254 65 | scheme: HTTP 66 | periodSeconds: 10 67 | successThreshold: 1 68 | failureThreshold: 3 69 | timeoutSeconds: 5 70 | lifecycle: 71 | preStop: 72 | exec: 73 | command: 74 | - /wait-shutdown 75 | securityContext: 76 | capabilities: 77 | add: 78 | - NET_BIND_SERVICE 79 | drop: 80 | - ALL 81 | runAsUser: 101 # www-data 82 | restartPolicy: Always 83 | terminationGracePeriodSeconds: 300 84 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | - apiGroups: 63 | - discovery.k8s.io 64 | resources: 65 | - "endpointslices" 66 | verbs: 67 | - get 68 | - list 69 | - watch 70 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | - endpoints 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - configmaps 20 | resourceNames: 21 | # Defaults to "-" 22 | # Here: "-" 23 | # This has to be adapted if you change either parameter 24 | # when launching the nginx-ingress-controller. 25 | - "ingress-controller-leader-public" 26 | verbs: 27 | - get 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - configmaps 33 | verbs: 34 | - create 35 | - apiGroups: 36 | - "coordination.k8s.io" 37 | resources: 38 | - leases 39 | verbs: 40 | - create 41 | - get 42 | - update 43 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/prometheus/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | labels: 6 | name: monitoring 7 | -------------------------------------------------------------------------------- /addons/prometheus/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: prometheus 11 | phase: prod 12 | template: 13 | metadata: 14 | labels: 15 | name: prometheus 16 | phase: prod 17 | spec: 18 | securityContext: 19 | seccompProfile: 20 | type: RuntimeDefault 21 | serviceAccountName: prometheus 22 | containers: 23 | - name: prometheus 24 | image: quay.io/prometheus/prometheus:v2.40.5 25 | args: 26 | - --web.listen-address=0.0.0.0:9090 27 | - --config.file=/etc/prometheus/prometheus.yaml 28 | - --storage.tsdb.path=/var/lib/prometheus 29 | ports: 30 | - name: web 31 | containerPort: 9090 32 | resources: 33 | requests: 34 | cpu: 100m 35 | memory: 200Mi 36 | volumeMounts: 37 | - name: config 38 | mountPath: /etc/prometheus 39 | - name: rules 40 | mountPath: /etc/prometheus/rules 41 | - name: data 42 | mountPath: /var/lib/prometheus 43 | livenessProbe: 44 | httpGet: 45 | path: /-/healthy 46 | port: 9090 47 | initialDelaySeconds: 10 48 | timeoutSeconds: 10 49 | readinessProbe: 50 | httpGet: 51 | path: /-/ready 52 | port: 9090 53 | initialDelaySeconds: 10 54 | timeoutSeconds: 10 55 | terminationGracePeriodSeconds: 30 56 | volumes: 57 | - name: config 58 | configMap: 59 | name: prometheus-config 60 | - name: rules 61 | configMap: 62 | name: prometheus-rules 63 | - name: data 64 | emptyDir: {} 65 | -------------------------------------------------------------------------------- /addons/prometheus/discovery/kube-controller-manager.yaml: -------------------------------------------------------------------------------- 1 | # Allow Prometheus to discover service endpoints 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-controller-manager 6 | namespace: kube-system 7 | spec: 8 | type: ClusterIP 9 | clusterIP: None 10 | selector: 11 | k8s-app: kube-controller-manager 12 | ports: 13 | - name: metrics 14 | protocol: TCP 15 | port: 10257 16 | targetPort: 10257 17 | -------------------------------------------------------------------------------- /addons/prometheus/discovery/kube-proxy.yaml: -------------------------------------------------------------------------------- 1 | # Allow Prometheus to scrape service endpoints 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-proxy 6 | namespace: kube-system 7 | annotations: 8 | prometheus.io/scrape: 'true' 9 | prometheus.io/port: '10249' 10 | spec: 11 | type: ClusterIP 12 | clusterIP: None 13 | selector: 14 | k8s-app: kube-proxy 15 | ports: 16 | - name: metrics 17 | protocol: TCP 18 | port: 10249 19 | targetPort: 10249 20 | -------------------------------------------------------------------------------- /addons/prometheus/discovery/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | # Allow Prometheus to discover service endpoints 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-scheduler 6 | namespace: kube-system 7 | spec: 8 | type: ClusterIP 9 | clusterIP: None 10 | selector: 11 | k8s-app: kube-scheduler 12 | ports: 13 | - name: metrics 14 | protocol: TCP 15 | port: 10259 16 | targetPort: 10259 17 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kube-state-metrics 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kube-state-metrics 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kube-state-metrics 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | - nodes 12 | - pods 13 | - services 14 | - resourcequotas 15 | - replicationcontrollers 16 | - limitranges 17 | - persistentvolumeclaims 18 | - persistentvolumes 19 | - namespaces 20 | - endpoints 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - extensions 26 | resources: 27 | - daemonsets 28 | - deployments 29 | - replicasets 30 | - ingresses 31 | verbs: 32 | - list 33 | - watch 34 | - apiGroups: 35 | - apps 36 | resources: 37 | - statefulsets 38 | - daemonsets 39 | - deployments 40 | - replicasets 41 | verbs: 42 | - list 43 | - watch 44 | - apiGroups: 45 | - batch 46 | resources: 47 | - cronjobs 48 | - jobs 49 | verbs: 50 | - list 51 | - watch 52 | - apiGroups: 53 | - autoscaling 54 | resources: 55 | - horizontalpodautoscalers 56 | verbs: 57 | - list 58 | - watch 59 | - apiGroups: 60 | - policy 61 | resources: 62 | - poddisruptionbudgets 63 | verbs: 64 | - list 65 | - watch 66 | - apiGroups: 67 | - certificates.k8s.io 68 | resources: 69 | - certificatesigningrequests 70 | verbs: 71 | - list 72 | - watch 73 | - apiGroups: 74 | - storage.k8s.io 75 | resources: 76 | - storageclasses 77 | - volumeattachments 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - admissionregistration.k8s.io 83 | resources: 84 | - mutatingwebhookconfigurations 85 | - validatingwebhookconfigurations 86 | verbs: 87 | - list 88 | - watch 89 | - apiGroups: 90 | - networking.k8s.io 91 | resources: 92 | - networkpolicies 93 | - ingresses 94 | verbs: 95 | - list 96 | - watch 97 | - apiGroups: 98 | - coordination.k8s.io 99 | resources: 100 | - leases 101 | verbs: 102 | - list 103 | - watch 104 | 105 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: RollingUpdate 10 | rollingUpdate: 11 | maxUnavailable: 1 12 | selector: 13 | matchLabels: 14 | name: kube-state-metrics 15 | phase: prod 16 | template: 17 | metadata: 18 | labels: 19 | name: kube-state-metrics 20 | phase: prod 21 | spec: 22 | securityContext: 23 | seccompProfile: 24 | type: RuntimeDefault 25 | serviceAccountName: kube-state-metrics 26 | containers: 27 | - name: kube-state-metrics 28 | image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.7.0 29 | ports: 30 | - name: metrics 31 | containerPort: 8080 32 | - name: telemetry 33 | containerPort: 8081 34 | livenessProbe: 35 | httpGet: 36 | path: /healthz 37 | port: 8080 38 | initialDelaySeconds: 5 39 | timeoutSeconds: 5 40 | readinessProbe: 41 | httpGet: 42 | path: / 43 | port: 8081 44 | initialDelaySeconds: 5 45 | timeoutSeconds: 5 46 | securityContext: 47 | runAsUser: 65534 48 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | spec: 9 | type: ClusterIP 10 | # service is created to allow prometheus to scape endpoints 11 | clusterIP: None 12 | selector: 13 | name: kube-state-metrics 14 | phase: prod 15 | ports: 16 | - name: metrics 17 | protocol: TCP 18 | port: 8080 19 | targetPort: 8080 20 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/node-exporter/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | spec: 7 | updateStrategy: 8 | type: RollingUpdate 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: node-exporter 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: node-exporter 19 | phase: prod 20 | spec: 21 | serviceAccountName: node-exporter 22 | securityContext: 23 | runAsNonRoot: true 24 | runAsUser: 65534 25 | runAsGroup: 65534 26 | fsGroup: 65534 27 | seccompProfile: 28 | type: RuntimeDefault 29 | hostNetwork: true 30 | hostPID: true 31 | containers: 32 | - name: node-exporter 33 | image: quay.io/prometheus/node-exporter:v1.5.0 34 | args: 35 | - --path.procfs=/host/proc 36 | - --path.sysfs=/host/sys 37 | - --path.rootfs=/host/root 38 | ports: 39 | - name: metrics 40 | containerPort: 9100 41 | hostPort: 9100 42 | resources: 43 | requests: 44 | cpu: 100m 45 | memory: 50Mi 46 | limits: 47 | cpu: 200m 48 | memory: 100Mi 49 | securityContext: 50 | seLinuxOptions: 51 | type: spc_t 52 | volumeMounts: 53 | - name: proc 54 | mountPath: /host/proc 55 | readOnly: true 56 | - name: sys 57 | mountPath: /host/sys 58 | readOnly: true 59 | - name: root 60 | mountPath: /host/root 61 | mountPropagation: HostToContainer 62 | readOnly: true 63 | tolerations: 64 | - key: node-role.kubernetes.io/controller 65 | operator: Exists 66 | - key: node-role.kubernetes.io/control-plane 67 | operator: Exists 68 | - key: node.kubernetes.io/not-ready 69 | operator: Exists 70 | volumes: 71 | - name: proc 72 | hostPath: 73 | path: /proc 74 | - name: sys 75 | hostPath: 76 | path: /sys 77 | - name: root 78 | hostPath: 79 | path: / 80 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/node-exporter/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/node-exporter/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | spec: 9 | type: ClusterIP 10 | # service is created to allow prometheus to scape endpoints 11 | clusterIP: None 12 | selector: 13 | name: node-exporter 14 | phase: prod 15 | ports: 16 | - name: metrics 17 | protocol: TCP 18 | port: 80 19 | targetPort: 9100 20 | -------------------------------------------------------------------------------- /addons/prometheus/network-policy.yaml: -------------------------------------------------------------------------------- 1 | # Allow Grafana access and in-cluster Prometheus scraping 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: prometheus 6 | namespace: monitoring 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: prometheus 11 | ingress: 12 | - ports: 13 | - protocol: TCP 14 | port: 9090 15 | from: 16 | - namespaceSelector: 17 | matchLabels: 18 | name: monitoring 19 | podSelector: 20 | matchLabels: 21 | name: grafana 22 | - namespaceSelector: 23 | matchLabels: 24 | name: monitoring 25 | podSelector: 26 | matchLabels: 27 | name: prometheus 28 | 29 | -------------------------------------------------------------------------------- /addons/prometheus/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/prometheus/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - nodes 9 | - nodes/metrics 10 | - services 11 | - endpoints 12 | - pods 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - nonResourceURLs: ["/metrics"] 18 | verbs: ["get"] 19 | - apiGroups: 20 | - networking.k8s.io 21 | resources: 22 | - ingresses 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | -------------------------------------------------------------------------------- /addons/prometheus/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/prometheus/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '9090' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: prometheus 13 | phase: prod 14 | ports: 15 | - name: web 16 | protocol: TCP 17 | port: 80 18 | targetPort: 9090 19 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/fedora-coreos/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/fedora-coreos/aws/). 23 | 24 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/ami.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "fedora-coreos" { 2 | most_recent = true 3 | owners = ["125523088429"] 4 | 5 | filter { 6 | name = "architecture" 7 | values = ["x86_64"] 8 | } 9 | 10 | filter { 11 | name = "virtualization-type" 12 | values = ["hvm"] 13 | } 14 | 15 | filter { 16 | name = "description" 17 | values = ["Fedora CoreOS ${var.os_stream} *"] 18 | } 19 | } 20 | 21 | data "aws_ami" "fedora-coreos-arm" { 22 | count = var.controller_arch == "arm64" ? 1 : 0 23 | 24 | most_recent = true 25 | owners = ["125523088429"] 26 | 27 | filter { 28 | name = "architecture" 29 | values = ["arm64"] 30 | } 31 | 32 | filter { 33 | name = "virtualization-type" 34 | values = ["hvm"] 35 | } 36 | 37 | filter { 38 | name = "description" 39 | values = ["Fedora CoreOS ${var.os_stream} *"] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | service_account_issuer = var.service_account_issuer 8 | etcd_servers = aws_route53_record.etcds.*.fqdn 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | daemonset_tolerations = var.daemonset_tolerations 13 | components = var.components 14 | } 15 | 16 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/controllers.tf: -------------------------------------------------------------------------------- 1 | # Discrete DNS records for each controller's private IPv4 for etcd usage 2 | resource "aws_route53_record" "etcds" { 3 | count = var.controller_count 4 | 5 | # DNS Zone where record should be created 6 | zone_id = var.dns_zone_id 7 | 8 | name = format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone) 9 | type = "A" 10 | ttl = 300 11 | 12 | # private IPv4 address for etcd 13 | records = [aws_instance.controllers.*.private_ip[count.index]] 14 | } 15 | 16 | # Controller instances 17 | resource "aws_instance" "controllers" { 18 | count = var.controller_count 19 | 20 | tags = { 21 | Name = "${var.cluster_name}-controller-${count.index}" 22 | } 23 | instance_type = var.controller_type 24 | ami = var.controller_arch == "arm64" ? data.aws_ami.fedora-coreos-arm[0].image_id : data.aws_ami.fedora-coreos.image_id 25 | 26 | # storage 27 | root_block_device { 28 | volume_type = var.controller_disk_type 29 | volume_size = var.controller_disk_size 30 | iops = var.controller_disk_iops 31 | encrypted = true 32 | tags = { 33 | Name = "${var.cluster_name}-controller-${count.index}" 34 | } 35 | } 36 | 37 | # network 38 | associate_public_ip_address = true 39 | subnet_id = element(aws_subnet.public.*.id, count.index) 40 | vpc_security_group_ids = [aws_security_group.controller.id] 41 | 42 | # boot 43 | user_data = data.ct_config.controllers.*.rendered[count.index] 44 | 45 | # cost 46 | credit_specification { 47 | cpu_credits = var.controller_cpu_credits 48 | } 49 | 50 | lifecycle { 51 | ignore_changes = [ 52 | ami, 53 | user_data, 54 | ] 55 | } 56 | } 57 | 58 | # Fedora CoreOS controllers 59 | data "ct_config" "controllers" { 60 | count = var.controller_count 61 | content = templatefile("${path.module}/butane/controller.yaml", { 62 | # Cannot use cyclic dependencies on controllers or their DNS records 63 | etcd_name = "etcd${count.index}" 64 | etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" 65 | # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,... 66 | etcd_initial_cluster = join(",", [ 67 | for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380" 68 | ]) 69 | kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet) 70 | ssh_authorized_key = var.ssh_authorized_key 71 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 72 | }) 73 | strict = true 74 | snippets = var.controller_snippets 75 | } 76 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/network.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "all" { 2 | } 3 | 4 | # Network VPC, gateway, and routes 5 | 6 | resource "aws_vpc" "network" { 7 | cidr_block = var.host_cidr 8 | assign_generated_ipv6_cidr_block = true 9 | enable_dns_support = true 10 | enable_dns_hostnames = true 11 | 12 | tags = { 13 | "Name" = var.cluster_name 14 | } 15 | } 16 | 17 | resource "aws_internet_gateway" "gateway" { 18 | vpc_id = aws_vpc.network.id 19 | 20 | tags = { 21 | "Name" = var.cluster_name 22 | } 23 | } 24 | 25 | resource "aws_route_table" "default" { 26 | vpc_id = aws_vpc.network.id 27 | 28 | tags = { 29 | "Name" = var.cluster_name 30 | } 31 | } 32 | 33 | resource "aws_route" "egress-ipv4" { 34 | route_table_id = aws_route_table.default.id 35 | destination_cidr_block = "0.0.0.0/0" 36 | gateway_id = aws_internet_gateway.gateway.id 37 | } 38 | 39 | resource "aws_route" "egress-ipv6" { 40 | route_table_id = aws_route_table.default.id 41 | destination_ipv6_cidr_block = "::/0" 42 | gateway_id = aws_internet_gateway.gateway.id 43 | } 44 | 45 | # Subnets (one per availability zone) 46 | 47 | resource "aws_subnet" "public" { 48 | count = length(data.aws_availability_zones.all.names) 49 | 50 | tags = { 51 | "Name" = "${var.cluster_name}-public-${count.index}" 52 | } 53 | vpc_id = aws_vpc.network.id 54 | availability_zone = data.aws_availability_zones.all.names[count.index] 55 | 56 | # IPv4 and IPv6 CIDR blocks 57 | cidr_block = cidrsubnet(var.host_cidr, 4, count.index) 58 | ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index) 59 | 60 | # Assign IPv4 and IPv6 addresses to instances 61 | map_public_ip_on_launch = true 62 | assign_ipv6_address_on_creation = true 63 | 64 | # Hostnames assigned to instances 65 | # resource-name: .region.compute.internal 66 | private_dns_hostname_type_on_launch = "resource-name" 67 | enable_resource_name_dns_a_record_on_launch = true 68 | enable_resource_name_dns_aaaa_record_on_launch = true 69 | } 70 | 71 | resource "aws_route_table_association" "public" { 72 | count = length(data.aws_availability_zones.all.names) 73 | 74 | route_table_id = aws_route_table.default.id 75 | subnet_id = aws_subnet.public.*.id[count.index] 76 | } 77 | 78 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/nlb.tf: -------------------------------------------------------------------------------- 1 | # Network Load Balancer DNS Record 2 | resource "aws_route53_record" "apiserver" { 3 | zone_id = var.dns_zone_id 4 | 5 | name = format("%s.%s.", var.cluster_name, var.dns_zone) 6 | type = "A" 7 | 8 | # AWS recommends their special "alias" records for NLBs 9 | alias { 10 | name = aws_lb.nlb.dns_name 11 | zone_id = aws_lb.nlb.zone_id 12 | evaluate_target_health = true 13 | } 14 | } 15 | 16 | # Network Load Balancer for apiservers and ingress 17 | resource "aws_lb" "nlb" { 18 | name = "${var.cluster_name}-nlb" 19 | load_balancer_type = "network" 20 | ip_address_type = "dualstack" 21 | internal = false 22 | 23 | subnets = aws_subnet.public.*.id 24 | 25 | enable_cross_zone_load_balancing = true 26 | } 27 | 28 | # Forward TCP apiserver traffic to controllers 29 | resource "aws_lb_listener" "apiserver-https" { 30 | load_balancer_arn = aws_lb.nlb.arn 31 | protocol = "TCP" 32 | port = "6443" 33 | 34 | default_action { 35 | type = "forward" 36 | target_group_arn = aws_lb_target_group.controllers.arn 37 | } 38 | } 39 | 40 | # Forward HTTP ingress traffic to workers 41 | resource "aws_lb_listener" "ingress-http" { 42 | load_balancer_arn = aws_lb.nlb.arn 43 | protocol = "TCP" 44 | port = 80 45 | 46 | default_action { 47 | type = "forward" 48 | target_group_arn = module.workers.target_group_http 49 | } 50 | } 51 | 52 | # Forward HTTPS ingress traffic to workers 53 | resource "aws_lb_listener" "ingress-https" { 54 | load_balancer_arn = aws_lb.nlb.arn 55 | protocol = "TCP" 56 | port = 443 57 | 58 | default_action { 59 | type = "forward" 60 | target_group_arn = module.workers.target_group_https 61 | } 62 | } 63 | 64 | # Target group of controllers 65 | resource "aws_lb_target_group" "controllers" { 66 | name = "${var.cluster_name}-controllers" 67 | vpc_id = aws_vpc.network.id 68 | target_type = "instance" 69 | 70 | protocol = "TCP" 71 | port = 6443 72 | 73 | # TCP health check for apiserver 74 | health_check { 75 | protocol = "TCP" 76 | port = 6443 77 | 78 | # NLBs required to use same healthy and unhealthy thresholds 79 | healthy_threshold = 3 80 | unhealthy_threshold = 3 81 | 82 | # Interval between health checks required to be 10 or 30 83 | interval = 10 84 | } 85 | } 86 | 87 | # Attach controller instances to apiserver NLB 88 | resource "aws_lb_target_group_attachment" "controllers" { 89 | count = var.controller_count 90 | 91 | target_group_arn = aws_lb_target_group.controllers.arn 92 | target_id = aws_instance.controllers.*.id[count.index] 93 | port = 6443 94 | } 95 | 96 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "ingress_dns_name" { 9 | value = aws_lb.nlb.dns_name 10 | description = "DNS name of the network load balancer for distributing traffic to Ingress controllers" 11 | } 12 | 13 | output "ingress_zone_id" { 14 | value = aws_lb.nlb.zone_id 15 | description = "Route53 zone id of the network load balancer DNS name that can be used in Route53 alias records" 16 | } 17 | 18 | # Outputs for worker pools 19 | 20 | output "vpc_id" { 21 | value = aws_vpc.network.id 22 | description = "ID of the VPC for creating worker instances" 23 | } 24 | 25 | output "subnet_ids" { 26 | value = aws_subnet.public.*.id 27 | description = "List of subnet IDs for creating worker instances" 28 | } 29 | 30 | output "worker_security_groups" { 31 | value = [aws_security_group.worker.id] 32 | description = "List of worker security group IDs" 33 | } 34 | 35 | output "kubeconfig" { 36 | value = module.bootstrap.kubeconfig-kubelet 37 | sensitive = true 38 | } 39 | 40 | # Outputs for custom load balancing 41 | 42 | output "nlb_id" { 43 | description = "ARN of the Network Load Balancer" 44 | value = aws_lb.nlb.id 45 | } 46 | 47 | output "worker_target_group_http" { 48 | description = "ARN of a target group of workers for HTTP traffic" 49 | value = module.workers.target_group_http 50 | } 51 | 52 | output "worker_target_group_https" { 53 | description = "ARN of a target group of workers for HTTPS traffic" 54 | value = module.workers.target_group_https 55 | } 56 | 57 | # Outputs for debug 58 | 59 | output "assets_dist" { 60 | value = module.bootstrap.assets_dist 61 | sensitive = true 62 | } 63 | 64 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = aws_instance.controllers.*.public_ip[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "/home/core/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | aws_route53_record.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = aws_instance.controllers[0].public_ip 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | aws = ">= 2.23, <= 6.0" 7 | null = ">= 2.1" 8 | ct = { 9 | source = "poseidon/ct" 10 | version = "~> 0.13" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # AWS 6 | vpc_id = aws_vpc.network.id 7 | subnet_ids = aws_subnet.public.*.id 8 | security_groups = [aws_security_group.worker.id] 9 | 10 | # instances 11 | os_stream = var.os_stream 12 | worker_count = var.worker_count 13 | instance_type = var.worker_type 14 | arch = var.worker_arch 15 | disk_type = var.worker_disk_type 16 | disk_size = var.worker_disk_size 17 | disk_iops = var.worker_disk_iops 18 | cpu_credits = var.worker_cpu_credits 19 | spot_price = var.worker_price 20 | target_groups = var.worker_target_groups 21 | 22 | # configuration 23 | kubeconfig = module.bootstrap.kubeconfig-kubelet 24 | ssh_authorized_key = var.ssh_authorized_key 25 | service_cidr = var.service_cidr 26 | snippets = var.worker_snippets 27 | node_labels = var.worker_node_labels 28 | } 29 | 30 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/ami.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ami_id = var.arch == "arm64" ? data.aws_ami.fedora-coreos-arm[0].image_id : data.aws_ami.fedora-coreos.image_id 3 | } 4 | 5 | data "aws_ami" "fedora-coreos" { 6 | most_recent = true 7 | owners = ["125523088429"] 8 | 9 | filter { 10 | name = "architecture" 11 | values = ["x86_64"] 12 | } 13 | 14 | filter { 15 | name = "virtualization-type" 16 | values = ["hvm"] 17 | } 18 | 19 | filter { 20 | name = "description" 21 | values = ["Fedora CoreOS ${var.os_stream} *"] 22 | } 23 | } 24 | 25 | data "aws_ami" "fedora-coreos-arm" { 26 | count = var.arch == "arm64" ? 1 : 0 27 | 28 | most_recent = true 29 | owners = ["125523088429"] 30 | 31 | filter { 32 | name = "architecture" 33 | values = ["arm64"] 34 | } 35 | 36 | filter { 37 | name = "virtualization-type" 38 | values = ["hvm"] 39 | } 40 | 41 | filter { 42 | name = "description" 43 | values = ["Fedora CoreOS ${var.os_stream} *"] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/ingress.tf: -------------------------------------------------------------------------------- 1 | # Target groups of instances for use with load balancers 2 | 3 | resource "aws_lb_target_group" "workers-http" { 4 | name = "${var.name}-workers-http" 5 | vpc_id = var.vpc_id 6 | target_type = "instance" 7 | 8 | protocol = "TCP" 9 | port = 80 10 | 11 | # HTTP health check for ingress 12 | health_check { 13 | protocol = "HTTP" 14 | port = 10254 15 | path = "/healthz" 16 | 17 | # NLBs required to use same healthy and unhealthy thresholds 18 | healthy_threshold = 3 19 | unhealthy_threshold = 3 20 | 21 | # Interval between health checks required to be 10 or 30 22 | interval = 10 23 | } 24 | } 25 | 26 | resource "aws_lb_target_group" "workers-https" { 27 | name = "${var.name}-workers-https" 28 | vpc_id = var.vpc_id 29 | target_type = "instance" 30 | 31 | protocol = "TCP" 32 | port = 443 33 | 34 | # HTTP health check for ingress 35 | health_check { 36 | protocol = "HTTP" 37 | port = 10254 38 | path = "/healthz" 39 | 40 | # NLBs required to use same healthy and unhealthy thresholds 41 | healthy_threshold = 3 42 | unhealthy_threshold = 3 43 | 44 | # Interval between health checks required to be 10 or 30 45 | interval = 10 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | output "target_group_http" { 2 | description = "ARN of a target group of workers for HTTP traffic" 3 | value = aws_lb_target_group.workers-http.arn 4 | } 5 | 6 | output "target_group_https" { 7 | description = "ARN of a target group of workers for HTTPS traffic" 8 | value = aws_lb_target_group.workers-https.arn 9 | } 10 | 11 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | aws = ">= 2.23, <= 6.0" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/flatcar-linux/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/flatcar-linux/aws/). 23 | 24 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/ami.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Pick a Flatcar Linux AMI 3 | # flatcar-stable -> Flatcar Linux AMI 4 | ami_id = var.controller_arch == "arm64" ? data.aws_ami.flatcar-arm64[0].image_id : data.aws_ami.flatcar.image_id 5 | channel = split("-", var.os_image)[1] 6 | } 7 | 8 | data "aws_ami" "flatcar" { 9 | most_recent = true 10 | owners = ["075585003325"] 11 | 12 | filter { 13 | name = "architecture" 14 | values = ["x86_64"] 15 | } 16 | 17 | filter { 18 | name = "virtualization-type" 19 | values = ["hvm"] 20 | } 21 | 22 | filter { 23 | name = "name" 24 | values = ["Flatcar-${local.channel}-*"] 25 | } 26 | } 27 | 28 | data "aws_ami" "flatcar-arm64" { 29 | count = var.controller_arch == "arm64" ? 1 : 0 30 | 31 | most_recent = true 32 | owners = ["075585003325"] 33 | 34 | filter { 35 | name = "architecture" 36 | values = ["arm64"] 37 | } 38 | 39 | filter { 40 | name = "virtualization-type" 41 | values = ["hvm"] 42 | } 43 | 44 | filter { 45 | name = "name" 46 | values = ["Flatcar-${local.channel}-*"] 47 | } 48 | } 49 | 50 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | service_account_issuer = var.service_account_issuer 8 | etcd_servers = aws_route53_record.etcds.*.fqdn 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | daemonset_tolerations = var.daemonset_tolerations 13 | components = var.components 14 | } 15 | 16 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/controllers.tf: -------------------------------------------------------------------------------- 1 | # Discrete DNS records for each controller's private IPv4 for etcd usage 2 | resource "aws_route53_record" "etcds" { 3 | count = var.controller_count 4 | 5 | # DNS Zone where record should be created 6 | zone_id = var.dns_zone_id 7 | 8 | name = format("%s-etcd%d.%s.", var.cluster_name, count.index, var.dns_zone) 9 | type = "A" 10 | ttl = 300 11 | 12 | # private IPv4 address for etcd 13 | records = [aws_instance.controllers.*.private_ip[count.index]] 14 | } 15 | 16 | # Controller instances 17 | resource "aws_instance" "controllers" { 18 | count = var.controller_count 19 | 20 | tags = { 21 | Name = "${var.cluster_name}-controller-${count.index}" 22 | } 23 | instance_type = var.controller_type 24 | ami = local.ami_id 25 | 26 | # storage 27 | root_block_device { 28 | volume_type = var.controller_disk_type 29 | volume_size = var.controller_disk_size 30 | iops = var.controller_disk_iops 31 | encrypted = true 32 | tags = { 33 | Name = "${var.cluster_name}-controller-${count.index}" 34 | } 35 | } 36 | 37 | # network 38 | associate_public_ip_address = true 39 | subnet_id = element(aws_subnet.public.*.id, count.index) 40 | vpc_security_group_ids = [aws_security_group.controller.id] 41 | 42 | # boot 43 | user_data = data.ct_config.controllers.*.rendered[count.index] 44 | 45 | # cost 46 | credit_specification { 47 | cpu_credits = var.controller_cpu_credits 48 | } 49 | 50 | lifecycle { 51 | ignore_changes = [ 52 | ami, 53 | user_data, 54 | ] 55 | } 56 | } 57 | 58 | # Flatcar Linux controllers 59 | data "ct_config" "controllers" { 60 | count = var.controller_count 61 | content = templatefile("${path.module}/butane/controller.yaml", { 62 | # Cannot use cyclic dependencies on controllers or their DNS records 63 | etcd_name = "etcd${count.index}" 64 | etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" 65 | # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,... 66 | etcd_initial_cluster = join(",", [ 67 | for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380" 68 | ]) 69 | kubeconfig = indent(10, module.bootstrap.kubeconfig-kubelet) 70 | ssh_authorized_key = var.ssh_authorized_key 71 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 72 | }) 73 | strict = true 74 | snippets = var.controller_snippets 75 | } 76 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/network.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "all" { 2 | } 3 | 4 | # Network VPC, gateway, and routes 5 | 6 | resource "aws_vpc" "network" { 7 | cidr_block = var.host_cidr 8 | assign_generated_ipv6_cidr_block = true 9 | enable_dns_support = true 10 | enable_dns_hostnames = true 11 | 12 | tags = { 13 | "Name" = var.cluster_name 14 | } 15 | } 16 | 17 | resource "aws_internet_gateway" "gateway" { 18 | vpc_id = aws_vpc.network.id 19 | 20 | tags = { 21 | "Name" = var.cluster_name 22 | } 23 | } 24 | 25 | resource "aws_route_table" "default" { 26 | vpc_id = aws_vpc.network.id 27 | 28 | tags = { 29 | "Name" = var.cluster_name 30 | } 31 | } 32 | 33 | resource "aws_route" "egress-ipv4" { 34 | route_table_id = aws_route_table.default.id 35 | destination_cidr_block = "0.0.0.0/0" 36 | gateway_id = aws_internet_gateway.gateway.id 37 | } 38 | 39 | resource "aws_route" "egress-ipv6" { 40 | route_table_id = aws_route_table.default.id 41 | destination_ipv6_cidr_block = "::/0" 42 | gateway_id = aws_internet_gateway.gateway.id 43 | } 44 | 45 | # Subnets (one per availability zone) 46 | 47 | resource "aws_subnet" "public" { 48 | count = length(data.aws_availability_zones.all.names) 49 | 50 | tags = { 51 | "Name" = "${var.cluster_name}-public-${count.index}" 52 | } 53 | vpc_id = aws_vpc.network.id 54 | availability_zone = data.aws_availability_zones.all.names[count.index] 55 | 56 | # IPv4 and IPv6 CIDR blocks 57 | cidr_block = cidrsubnet(var.host_cidr, 4, count.index) 58 | ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index) 59 | 60 | # Assign IPv4 and IPv6 addresses to instances 61 | map_public_ip_on_launch = true 62 | assign_ipv6_address_on_creation = true 63 | 64 | # Hostnames assigned to instances 65 | # resource-name: .region.compute.internal 66 | private_dns_hostname_type_on_launch = "resource-name" 67 | enable_resource_name_dns_a_record_on_launch = true 68 | enable_resource_name_dns_aaaa_record_on_launch = true 69 | } 70 | 71 | resource "aws_route_table_association" "public" { 72 | count = length(data.aws_availability_zones.all.names) 73 | 74 | route_table_id = aws_route_table.default.id 75 | subnet_id = aws_subnet.public.*.id[count.index] 76 | } 77 | 78 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "ingress_dns_name" { 9 | value = aws_lb.nlb.dns_name 10 | description = "DNS name of the network load balancer for distributing traffic to Ingress controllers" 11 | } 12 | 13 | output "ingress_zone_id" { 14 | value = aws_lb.nlb.zone_id 15 | description = "Route53 zone id of the network load balancer DNS name that can be used in Route53 alias records" 16 | } 17 | 18 | # Outputs for worker pools 19 | 20 | output "vpc_id" { 21 | value = aws_vpc.network.id 22 | description = "ID of the VPC for creating worker instances" 23 | } 24 | 25 | output "subnet_ids" { 26 | value = aws_subnet.public.*.id 27 | description = "List of subnet IDs for creating worker instances" 28 | } 29 | 30 | output "worker_security_groups" { 31 | value = [aws_security_group.worker.id] 32 | description = "List of worker security group IDs" 33 | } 34 | 35 | output "kubeconfig" { 36 | value = module.bootstrap.kubeconfig-kubelet 37 | sensitive = true 38 | } 39 | 40 | # Outputs for custom load balancing 41 | 42 | output "nlb_id" { 43 | description = "ARN of the Network Load Balancer" 44 | value = aws_lb.nlb.id 45 | } 46 | 47 | output "worker_target_group_http" { 48 | description = "ARN of a target group of workers for HTTP traffic" 49 | value = module.workers.target_group_http 50 | } 51 | 52 | output "worker_target_group_https" { 53 | description = "ARN of a target group of workers for HTTPS traffic" 54 | value = module.workers.target_group_https 55 | } 56 | 57 | # Outputs for debug 58 | 59 | output "assets_dist" { 60 | value = module.bootstrap.assets_dist 61 | sensitive = true 62 | } 63 | 64 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = aws_instance.controllers.*.public_ip[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "/home/core/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | aws_route53_record.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = aws_instance.controllers[0].public_ip 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | aws = ">= 2.23, <= 6.0" 7 | null = ">= 2.1" 8 | ct = { 9 | source = "poseidon/ct" 10 | version = "~> 0.13" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # AWS 6 | vpc_id = aws_vpc.network.id 7 | subnet_ids = aws_subnet.public.*.id 8 | security_groups = [aws_security_group.worker.id] 9 | 10 | # instances 11 | os_image = var.os_image 12 | worker_count = var.worker_count 13 | instance_type = var.worker_type 14 | arch = var.worker_arch 15 | disk_type = var.worker_disk_type 16 | disk_size = var.worker_disk_size 17 | disk_iops = var.worker_disk_iops 18 | spot_price = var.worker_price 19 | target_groups = var.worker_target_groups 20 | 21 | # configuration 22 | kubeconfig = module.bootstrap.kubeconfig-kubelet 23 | ssh_authorized_key = var.ssh_authorized_key 24 | service_cidr = var.service_cidr 25 | snippets = var.worker_snippets 26 | node_labels = var.worker_node_labels 27 | } 28 | 29 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/workers/ami.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Pick a Flatcar Linux AMI 3 | # flatcar-stable -> Flatcar Linux AMI 4 | ami_id = var.arch == "arm64" ? data.aws_ami.flatcar-arm64[0].image_id : data.aws_ami.flatcar.image_id 5 | channel = split("-", var.os_image)[1] 6 | } 7 | 8 | data "aws_ami" "flatcar" { 9 | most_recent = true 10 | owners = ["075585003325"] 11 | 12 | filter { 13 | name = "architecture" 14 | values = ["x86_64"] 15 | } 16 | 17 | filter { 18 | name = "virtualization-type" 19 | values = ["hvm"] 20 | } 21 | 22 | filter { 23 | name = "name" 24 | values = ["Flatcar-${local.channel}-*"] 25 | } 26 | } 27 | 28 | data "aws_ami" "flatcar-arm64" { 29 | count = var.arch == "arm64" ? 1 : 0 30 | 31 | most_recent = true 32 | owners = ["075585003325"] 33 | 34 | filter { 35 | name = "architecture" 36 | values = ["arm64"] 37 | } 38 | 39 | filter { 40 | name = "virtualization-type" 41 | values = ["hvm"] 42 | } 43 | 44 | filter { 45 | name = "name" 46 | values = ["Flatcar-${local.channel}-*"] 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/workers/ingress.tf: -------------------------------------------------------------------------------- 1 | # Target groups of instances for use with load balancers 2 | 3 | resource "aws_lb_target_group" "workers-http" { 4 | name = "${var.name}-workers-http" 5 | vpc_id = var.vpc_id 6 | target_type = "instance" 7 | 8 | protocol = "TCP" 9 | port = 80 10 | 11 | # HTTP health check for ingress 12 | health_check { 13 | protocol = "HTTP" 14 | port = 10254 15 | path = "/healthz" 16 | 17 | # NLBs required to use same healthy and unhealthy thresholds 18 | healthy_threshold = 3 19 | unhealthy_threshold = 3 20 | 21 | # Interval between health checks required to be 10 or 30 22 | interval = 10 23 | } 24 | } 25 | 26 | resource "aws_lb_target_group" "workers-https" { 27 | name = "${var.name}-workers-https" 28 | vpc_id = var.vpc_id 29 | target_type = "instance" 30 | 31 | protocol = "TCP" 32 | port = 443 33 | 34 | # HTTP health check for ingress 35 | health_check { 36 | protocol = "HTTP" 37 | port = 10254 38 | path = "/healthz" 39 | 40 | # NLBs required to use same healthy and unhealthy thresholds 41 | healthy_threshold = 3 42 | unhealthy_threshold = 3 43 | 44 | # Interval between health checks required to be 10 or 30 45 | interval = 10 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | output "target_group_http" { 2 | description = "ARN of a target group of workers for HTTP traffic" 3 | value = aws_lb_target_group.workers-http.arn 4 | } 5 | 6 | output "target_group_https" { 7 | description = "ARN of a target group of workers for HTTPS traffic" 8 | value = aws_lb_target_group.workers-https.arn 9 | } 10 | 11 | -------------------------------------------------------------------------------- /aws/flatcar-linux/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | aws = ">= 2.23, <= 6.0" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Typhoon Authors 4 | Copyright (c) 2020 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Azure [tutorial](https://typhoon.psdn.io/fedora-coreos/azure/). 23 | 24 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone) 7 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 8 | 9 | service_account_issuer = var.service_account_issuer 10 | networking = var.networking 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | daemonset_tolerations = var.daemonset_tolerations 14 | components = var.components 15 | } 16 | 17 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | backend_address_pool_ids = { 3 | ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id] 4 | ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id] 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "ingress_static_ipv4" { 9 | value = azurerm_public_ip.frontend-ipv4.ip_address 10 | description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers" 11 | } 12 | 13 | output "ingress_static_ipv6" { 14 | value = azurerm_public_ip.frontend-ipv6.ip_address 15 | description = "IPv6 address of the load balancer for distributing traffic to Ingress controllers" 16 | } 17 | 18 | # Outputs for worker pools 19 | 20 | output "location" { 21 | value = azurerm_resource_group.cluster.location 22 | } 23 | 24 | output "resource_group_name" { 25 | value = azurerm_resource_group.cluster.name 26 | } 27 | 28 | output "resource_group_id" { 29 | value = azurerm_resource_group.cluster.id 30 | } 31 | 32 | output "subnet_id" { 33 | value = azurerm_subnet.worker.id 34 | } 35 | 36 | output "security_group_id" { 37 | value = azurerm_network_security_group.worker.id 38 | } 39 | 40 | output "kubeconfig" { 41 | value = module.bootstrap.kubeconfig-kubelet 42 | sensitive = true 43 | } 44 | 45 | # Outputs for custom firewalling 46 | 47 | output "controller_security_group_name" { 48 | description = "Network Security Group for controller nodes" 49 | value = azurerm_network_security_group.controller.name 50 | } 51 | 52 | output "worker_security_group_name" { 53 | description = "Network Security Group for worker nodes" 54 | value = azurerm_network_security_group.worker.name 55 | } 56 | 57 | output "controller_address_prefixes" { 58 | description = "Controller network subnet CIDR addresses (for source/destination)" 59 | value = local.controller_subnets 60 | } 61 | 62 | output "worker_address_prefixes" { 63 | description = "Worker network subnet CIDR addresses (for source/destination)" 64 | value = local.worker_subnets 65 | } 66 | 67 | # Outputs for custom load balancing 68 | 69 | output "loadbalancer_id" { 70 | description = "ID of the cluster load balancer" 71 | value = azurerm_lb.cluster.id 72 | } 73 | 74 | output "backend_address_pool_ids" { 75 | description = "IDs of the worker backend address pools" 76 | value = { 77 | ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id] 78 | ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id] 79 | } 80 | } 81 | 82 | # Outputs for debug 83 | 84 | output "assets_dist" { 85 | value = module.bootstrap.assets_dist 86 | sensitive = true 87 | } 88 | 89 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | azurerm_linux_virtual_machine.controllers 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = azurerm_public_ip.controllers-ipv4[count.index].ip_address 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = join("\n", local.assets_bundle) 28 | destination = "/home/core/assets" 29 | } 30 | 31 | provisioner "remote-exec" { 32 | inline = [ 33 | "sudo /opt/bootstrap/layout", 34 | ] 35 | } 36 | } 37 | 38 | # Connect to a controller to perform one-time cluster bootstrap. 39 | resource "null_resource" "bootstrap" { 40 | depends_on = [ 41 | null_resource.copy-controller-secrets, 42 | module.workers, 43 | azurerm_dns_a_record.apiserver, 44 | ] 45 | 46 | connection { 47 | type = "ssh" 48 | host = azurerm_public_ip.controllers-ipv4[0].ip_address 49 | user = "core" 50 | timeout = "15m" 51 | } 52 | 53 | provisioner "remote-exec" { 54 | inline = [ 55 | "sudo systemctl start bootstrap", 56 | ] 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | azurerm = ">= 2.8" 7 | null = ">= 2.1" 8 | ct = { 9 | source = "poseidon/ct" 10 | version = "~> 0.13" 11 | } 12 | } 13 | } 14 | 15 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # Azure 6 | resource_group_name = azurerm_resource_group.cluster.name 7 | location = azurerm_resource_group.cluster.location 8 | subnet_id = azurerm_subnet.worker.id 9 | security_group_id = azurerm_network_security_group.worker.id 10 | backend_address_pool_ids = local.backend_address_pool_ids 11 | 12 | # instances 13 | os_image = var.os_image 14 | worker_count = var.worker_count 15 | vm_type = var.worker_type 16 | disk_type = var.worker_disk_type 17 | disk_size = var.worker_disk_size 18 | ephemeral_disk = var.worker_ephemeral_disk 19 | priority = var.worker_priority 20 | 21 | # configuration 22 | kubeconfig = module.bootstrap.kubeconfig-kubelet 23 | ssh_authorized_key = var.ssh_authorized_key 24 | azure_authorized_key = var.azure_authorized_key 25 | service_cidr = var.service_cidr 26 | snippets = var.worker_snippets 27 | node_labels = var.worker_node_labels 28 | } 29 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | azurerm = ">= 2.8" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/flatcar-linux/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Azure [tutorial](https://typhoon.psdn.io/flatcar-linux/azure/). 23 | 24 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone) 8 | 9 | service_account_issuer = var.service_account_issuer 10 | networking = var.networking 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | daemonset_tolerations = var.daemonset_tolerations 14 | components = var.components 15 | } 16 | 17 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | backend_address_pool_ids = { 3 | ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id] 4 | ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id] 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "ingress_static_ipv4" { 9 | value = azurerm_public_ip.frontend-ipv4.ip_address 10 | description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers" 11 | } 12 | 13 | output "ingress_static_ipv6" { 14 | value = azurerm_public_ip.frontend-ipv6.ip_address 15 | description = "IPv6 address of the load balancer for distributing traffic to Ingress controllers" 16 | } 17 | 18 | # Outputs for worker pools 19 | 20 | output "location" { 21 | value = azurerm_resource_group.cluster.location 22 | } 23 | 24 | output "resource_group_name" { 25 | value = azurerm_resource_group.cluster.name 26 | } 27 | 28 | output "resource_group_id" { 29 | value = azurerm_resource_group.cluster.id 30 | } 31 | 32 | output "subnet_id" { 33 | value = azurerm_subnet.worker.id 34 | } 35 | 36 | output "security_group_id" { 37 | value = azurerm_network_security_group.worker.id 38 | } 39 | 40 | output "kubeconfig" { 41 | value = module.bootstrap.kubeconfig-kubelet 42 | sensitive = true 43 | } 44 | 45 | # Outputs for custom firewalling 46 | 47 | output "controller_security_group_name" { 48 | description = "Network Security Group for controller nodes" 49 | value = azurerm_network_security_group.controller.name 50 | } 51 | 52 | output "worker_security_group_name" { 53 | description = "Network Security Group for worker nodes" 54 | value = azurerm_network_security_group.worker.name 55 | } 56 | 57 | output "controller_address_prefixes" { 58 | description = "Controller network subnet CIDR addresses (for source/destination)" 59 | value = local.controller_subnets 60 | } 61 | 62 | output "worker_address_prefixes" { 63 | description = "Worker network subnet CIDR addresses (for source/destination)" 64 | value = local.worker_subnets 65 | } 66 | 67 | # Outputs for custom load balancing 68 | 69 | output "loadbalancer_id" { 70 | description = "ID of the cluster load balancer" 71 | value = azurerm_lb.cluster.id 72 | } 73 | 74 | output "backend_address_pool_ids" { 75 | description = "IDs of the worker backend address pools" 76 | value = { 77 | ipv4 = [azurerm_lb_backend_address_pool.worker-ipv4.id] 78 | ipv6 = [azurerm_lb_backend_address_pool.worker-ipv6.id] 79 | } 80 | } 81 | 82 | # Outputs for debug 83 | 84 | output "assets_dist" { 85 | value = module.bootstrap.assets_dist 86 | sensitive = true 87 | } 88 | 89 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | azurerm_linux_virtual_machine.controllers 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = azurerm_public_ip.controllers-ipv4[count.index].ip_address 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = join("\n", local.assets_bundle) 28 | destination = "/home/core/assets" 29 | } 30 | 31 | provisioner "remote-exec" { 32 | inline = [ 33 | "sudo /opt/bootstrap/layout", 34 | ] 35 | } 36 | } 37 | 38 | # Connect to a controller to perform one-time cluster bootstrap. 39 | resource "null_resource" "bootstrap" { 40 | depends_on = [ 41 | null_resource.copy-controller-secrets, 42 | module.workers, 43 | azurerm_dns_a_record.apiserver, 44 | ] 45 | 46 | connection { 47 | type = "ssh" 48 | host = azurerm_public_ip.controllers-ipv4[0].ip_address 49 | user = "core" 50 | timeout = "15m" 51 | } 52 | 53 | provisioner "remote-exec" { 54 | inline = [ 55 | "sudo systemctl start bootstrap", 56 | ] 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | azurerm = ">= 2.8" 7 | null = ">= 2.1" 8 | ct = { 9 | source = "poseidon/ct" 10 | version = "~> 0.13" 11 | } 12 | } 13 | } 14 | 15 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # Azure 6 | resource_group_name = azurerm_resource_group.cluster.name 7 | location = azurerm_resource_group.cluster.location 8 | subnet_id = azurerm_subnet.worker.id 9 | security_group_id = azurerm_network_security_group.worker.id 10 | backend_address_pool_ids = local.backend_address_pool_ids 11 | 12 | worker_count = var.worker_count 13 | vm_type = var.worker_type 14 | os_image = var.os_image 15 | disk_type = var.worker_disk_type 16 | disk_size = var.worker_disk_size 17 | ephemeral_disk = var.worker_ephemeral_disk 18 | priority = var.worker_priority 19 | 20 | # configuration 21 | kubeconfig = module.bootstrap.kubeconfig-kubelet 22 | ssh_authorized_key = var.ssh_authorized_key 23 | azure_authorized_key = var.azure_authorized_key 24 | service_cidr = var.service_cidr 25 | snippets = var.worker_snippets 26 | node_labels = var.worker_node_labels 27 | arch = var.worker_arch 28 | } 29 | -------------------------------------------------------------------------------- /azure/flatcar-linux/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | azurerm = ">= 2.8" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/fedora-coreos/bare-metal/). 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [var.k8s_domain_name] 7 | service_account_issuer = var.service_account_issuer 8 | etcd_servers = var.controllers.*.domain 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | components = var.components 13 | } 14 | 15 | 16 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for workers 7 | 8 | output "kubeconfig" { 9 | value = module.bootstrap.kubeconfig-kubelet 10 | sensitive = true 11 | } 12 | 13 | # Outputs for debug 14 | 15 | output "assets_dist" { 16 | value = module.bootstrap.assets_dist 17 | sensitive = true 18 | } 19 | 20 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = length(var.controllers) 13 | 14 | # Without depends_on, remote-exec could start and wait for machines before 15 | # matchbox groups are written, causing a deadlock. 16 | depends_on = [ 17 | matchbox_group.controller, 18 | module.bootstrap, 19 | ] 20 | 21 | connection { 22 | type = "ssh" 23 | host = var.controllers.*.domain[count.index] 24 | user = "core" 25 | timeout = "60m" 26 | } 27 | 28 | provisioner "file" { 29 | content = module.bootstrap.kubeconfig-kubelet 30 | destination = "/home/core/kubeconfig" 31 | } 32 | 33 | provisioner "file" { 34 | content = join("\n", local.assets_bundle) 35 | destination = "/home/core/assets" 36 | } 37 | 38 | provisioner "remote-exec" { 39 | inline = [ 40 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 41 | "sudo touch /etc/kubernetes", 42 | "sudo /opt/bootstrap/layout", 43 | ] 44 | } 45 | } 46 | 47 | # Connect to a controller to perform one-time cluster bootstrap. 48 | resource "null_resource" "bootstrap" { 49 | # Without depends_on, this remote-exec may start before the kubeconfig copy. 50 | # Terraform only does one task at a time, so it would try to bootstrap 51 | # while no Kubelets are running. 52 | depends_on = [ 53 | null_resource.copy-controller-secrets, 54 | ] 55 | 56 | connection { 57 | type = "ssh" 58 | host = var.controllers[0].domain 59 | user = "core" 60 | timeout = "15m" 61 | } 62 | 63 | provisioner "remote-exec" { 64 | inline = [ 65 | "sudo systemctl start bootstrap", 66 | ] 67 | } 68 | } 69 | 70 | 71 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | null = ">= 2.1" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | matchbox = { 12 | source = "poseidon/matchbox" 13 | version = "~> 0.5.0" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/worker/matchbox.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | remote_kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-kernel-x86_64" 3 | remote_initrd = [ 4 | "--name main https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-initramfs.x86_64.img", 5 | ] 6 | 7 | remote_args = [ 8 | "initrd=main", 9 | "coreos.live.rootfs_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-rootfs.x86_64.img", 10 | "coreos.inst.install_dev=${var.install_disk}", 11 | "coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}", 12 | ] 13 | 14 | cached_kernel = "/assets/fedora-coreos/fedora-coreos-${var.os_version}-live-kernel-x86_64" 15 | cached_initrd = [ 16 | "/assets/fedora-coreos/fedora-coreos-${var.os_version}-live-initramfs.x86_64.img", 17 | ] 18 | 19 | cached_args = [ 20 | "initrd=main", 21 | "coreos.live.rootfs_url=${var.matchbox_http_endpoint}/assets/fedora-coreos/fedora-coreos-${var.os_version}-live-rootfs.x86_64.img", 22 | "coreos.inst.install_dev=${var.install_disk}", 23 | "coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}", 24 | ] 25 | 26 | kernel = var.cached_install ? local.cached_kernel : local.remote_kernel 27 | initrd = var.cached_install ? local.cached_initrd : local.remote_initrd 28 | args = var.cached_install ? local.cached_args : local.remote_args 29 | } 30 | 31 | // Match a worker to a profile by MAC 32 | resource "matchbox_group" "worker" { 33 | name = format("%s-%s", var.cluster_name, var.name) 34 | profile = matchbox_profile.worker.name 35 | selector = { 36 | mac = var.mac 37 | } 38 | } 39 | 40 | // Fedora CoreOS worker profile 41 | resource "matchbox_profile" "worker" { 42 | name = format("%s-worker-%s", var.cluster_name, var.name) 43 | kernel = local.kernel 44 | initrd = local.initrd 45 | args = concat(local.args, var.kernel_args) 46 | 47 | raw_ignition = data.ct_config.worker.rendered 48 | } 49 | 50 | # Fedora CoreOS workers 51 | data "ct_config" "worker" { 52 | content = templatefile("${path.module}/butane/worker.yaml", { 53 | domain_name = var.domain 54 | ssh_authorized_key = var.ssh_authorized_key 55 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 56 | node_labels = join(",", var.node_labels) 57 | node_taints = join(",", var.node_taints) 58 | }) 59 | strict = true 60 | snippets = var.snippets 61 | } 62 | 63 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/worker/ssh.tf: -------------------------------------------------------------------------------- 1 | # Secure copy kubeconfig to worker. Activates kubelet.service 2 | resource "null_resource" "copy-worker-secrets" { 3 | # Without depends_on, remote-exec could start and wait for machines before 4 | # matchbox groups are written, causing a deadlock. 5 | depends_on = [ 6 | matchbox_group.worker, 7 | ] 8 | 9 | connection { 10 | type = "ssh" 11 | host = var.domain 12 | user = "core" 13 | timeout = "60m" 14 | } 15 | 16 | provisioner "file" { 17 | content = var.kubeconfig 18 | destination = "/home/core/kubeconfig" 19 | } 20 | 21 | provisioner "remote-exec" { 22 | inline = [ 23 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 24 | "sudo touch /etc/kubernetes", 25 | ] 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/worker/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | null = ">= 2.1" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | matchbox = { 12 | source = "poseidon/matchbox" 13 | version = "~> 0.5.0" 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | count = length(var.workers) 3 | source = "./worker" 4 | 5 | cluster_name = var.cluster_name 6 | 7 | # metal 8 | matchbox_http_endpoint = var.matchbox_http_endpoint 9 | os_stream = var.os_stream 10 | os_version = var.os_version 11 | 12 | # machine 13 | name = var.workers[count.index].name 14 | mac = var.workers[count.index].mac 15 | domain = var.workers[count.index].domain 16 | 17 | # configuration 18 | kubeconfig = module.bootstrap.kubeconfig-kubelet 19 | ssh_authorized_key = var.ssh_authorized_key 20 | service_cidr = var.service_cidr 21 | node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, []) 22 | node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, []) 23 | snippets = lookup(var.snippets, var.workers[count.index].name, []) 24 | 25 | # optional 26 | cached_install = var.cached_install 27 | install_disk = var.install_disk 28 | kernel_args = var.kernel_args 29 | } 30 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/flatcar-linux/bare-metal/). 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [var.k8s_domain_name] 7 | service_account_issuer = var.service_account_issuer 8 | etcd_servers = var.controllers.*.domain 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | components = var.components 13 | } 14 | 15 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/butane/install.yaml: -------------------------------------------------------------------------------- 1 | variant: flatcar 2 | version: 1.0.0 3 | systemd: 4 | units: 5 | - name: installer.service 6 | enabled: true 7 | contents: | 8 | [Unit] 9 | Requires=network-online.target 10 | After=network-online.target 11 | [Service] 12 | Type=simple 13 | ExecStart=/opt/installer 14 | [Install] 15 | WantedBy=multi-user.target 16 | # Avoid using the standard SSH port so terraform apply cannot SSH until 17 | # post-install. But admins may SSH to debug disk install problems. 18 | # After install, sshd will use port 22 and users/terraform can connect. 19 | - name: sshd.socket 20 | dropins: 21 | - name: 10-sshd-port.conf 22 | contents: | 23 | [Socket] 24 | ListenStream= 25 | ListenStream=2222 26 | storage: 27 | files: 28 | - path: /opt/installer 29 | mode: 0500 30 | contents: 31 | inline: | 32 | #!/bin/bash -ex 33 | curl --retry 10 "${ignition_endpoint}?mac=${mac}&os=installed" -o ignition.json 34 | flatcar-install \ 35 | -d ${install_disk} \ 36 | -C ${os_channel} \ 37 | -V ${os_version} \ 38 | ${oem_flag} \ 39 | ${baseurl_flag} \ 40 | -i ignition.json 41 | udevadm settle 42 | systemctl reboot 43 | passwd: 44 | users: 45 | - name: core 46 | ssh_authorized_keys: 47 | - "${ssh_authorized_key}" 48 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for workers 7 | 8 | output "kubeconfig" { 9 | value = module.bootstrap.kubeconfig-kubelet 10 | sensitive = true 11 | } 12 | 13 | # Outputs for debug 14 | 15 | output "assets_dist" { 16 | value = module.bootstrap.assets_dist 17 | sensitive = true 18 | } 19 | 20 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = length(var.controllers) 13 | 14 | # Without depends_on, remote-exec could start and wait for machines before 15 | # matchbox groups are written, causing a deadlock. 16 | depends_on = [ 17 | matchbox_group.install, 18 | matchbox_group.controller, 19 | module.bootstrap, 20 | ] 21 | 22 | connection { 23 | type = "ssh" 24 | host = var.controllers.*.domain[count.index] 25 | user = "core" 26 | timeout = "60m" 27 | } 28 | 29 | provisioner "file" { 30 | content = module.bootstrap.kubeconfig-kubelet 31 | destination = "/home/core/kubeconfig" 32 | } 33 | 34 | provisioner "file" { 35 | content = join("\n", local.assets_bundle) 36 | destination = "/home/core/assets" 37 | } 38 | 39 | provisioner "remote-exec" { 40 | inline = [ 41 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 42 | "sudo /opt/bootstrap/layout", 43 | ] 44 | } 45 | } 46 | 47 | # Connect to a controller to perform one-time cluster bootstrap. 48 | resource "null_resource" "bootstrap" { 49 | # Without depends_on, this remote-exec may start before the kubeconfig copy. 50 | # Terraform only does one task at a time, so it would try to bootstrap 51 | # while no Kubelets are running. 52 | depends_on = [ 53 | null_resource.copy-controller-secrets, 54 | ] 55 | 56 | connection { 57 | type = "ssh" 58 | host = var.controllers[0].domain 59 | user = "core" 60 | timeout = "15m" 61 | } 62 | 63 | provisioner "remote-exec" { 64 | inline = [ 65 | "sudo systemctl start bootstrap", 66 | ] 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | null = ">= 2.1" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | matchbox = { 12 | source = "poseidon/matchbox" 13 | version = "~> 0.5.0" 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/worker/butane/install.yaml: -------------------------------------------------------------------------------- 1 | variant: flatcar 2 | version: 1.0.0 3 | systemd: 4 | units: 5 | - name: installer.service 6 | enabled: true 7 | contents: | 8 | [Unit] 9 | Requires=network-online.target 10 | After=network-online.target 11 | [Service] 12 | Type=simple 13 | ExecStart=/opt/installer 14 | [Install] 15 | WantedBy=multi-user.target 16 | # Avoid using the standard SSH port so terraform apply cannot SSH until 17 | # post-install. But admins may SSH to debug disk install problems. 18 | # After install, sshd will use port 22 and users/terraform can connect. 19 | - name: sshd.socket 20 | dropins: 21 | - name: 10-sshd-port.conf 22 | contents: | 23 | [Socket] 24 | ListenStream= 25 | ListenStream=2222 26 | storage: 27 | files: 28 | - path: /opt/installer 29 | mode: 0500 30 | contents: 31 | inline: | 32 | #!/bin/bash -ex 33 | curl --retry 10 "${ignition_endpoint}?mac=${mac}&os=installed" -o ignition.json 34 | flatcar-install \ 35 | -d ${install_disk} \ 36 | -C ${os_channel} \ 37 | -V ${os_version} \ 38 | ${oem_flag} \ 39 | ${baseurl_flag} \ 40 | -i ignition.json 41 | udevadm settle 42 | systemctl reboot 43 | passwd: 44 | users: 45 | - name: core 46 | ssh_authorized_keys: 47 | - "${ssh_authorized_key}" 48 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/worker/ssh.tf: -------------------------------------------------------------------------------- 1 | # Secure copy kubeconfig to worker. Activates kubelet.service 2 | resource "null_resource" "copy-worker-secrets" { 3 | # Without depends_on, remote-exec could start and wait for machines before 4 | # matchbox groups are written, causing a deadlock. 5 | depends_on = [ 6 | matchbox_group.install, 7 | matchbox_group.worker, 8 | ] 9 | 10 | connection { 11 | type = "ssh" 12 | host = var.domain 13 | user = "core" 14 | timeout = "60m" 15 | } 16 | 17 | provisioner "file" { 18 | content = var.kubeconfig 19 | destination = "/home/core/kubeconfig" 20 | } 21 | 22 | provisioner "remote-exec" { 23 | inline = [ 24 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 25 | ] 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/worker/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | null = ">= 2.1" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | matchbox = { 12 | source = "poseidon/matchbox" 13 | version = "~> 0.5.0" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /bare-metal/flatcar-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | count = length(var.workers) 3 | source = "./worker" 4 | 5 | cluster_name = var.cluster_name 6 | 7 | # metal 8 | matchbox_http_endpoint = var.matchbox_http_endpoint 9 | os_channel = var.os_channel 10 | os_version = var.os_version 11 | 12 | # machine 13 | name = var.workers[count.index].name 14 | mac = var.workers[count.index].mac 15 | domain = var.workers[count.index].domain 16 | 17 | # configuration 18 | kubeconfig = module.bootstrap.kubeconfig-kubelet 19 | ssh_authorized_key = var.ssh_authorized_key 20 | service_cidr = var.service_cidr 21 | node_labels = lookup(var.worker_node_labels, var.workers[count.index].name, []) 22 | node_taints = lookup(var.worker_node_taints, var.workers[count.index].name, []) 23 | snippets = lookup(var.snippets, var.workers[count.index].name, []) 24 | 25 | # optional 26 | download_protocol = var.download_protocol 27 | cached_install = var.cached_install 28 | install_disk = var.install_disk 29 | kernel_args = var.kernel_args 30 | oem_type = var.oem_type 31 | } 32 | 33 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Typhoon Authors 4 | Copyright (c) 2020 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/fedora-coreos/digitalocean/). 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = digitalocean_record.etcds.*.fqdn 8 | 9 | service_account_issuer = var.service_account_issuer 10 | networking = var.networking 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | components = var.components 14 | } 15 | 16 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/controllers.tf: -------------------------------------------------------------------------------- 1 | # Controller Instance DNS records 2 | resource "digitalocean_record" "controllers" { 3 | count = var.controller_count 4 | 5 | # DNS zone where record should be created 6 | domain = var.dns_zone 7 | 8 | # DNS record (will be prepended to domain) 9 | name = var.cluster_name 10 | type = "A" 11 | ttl = 300 12 | 13 | # IPv4 addresses of controllers 14 | value = digitalocean_droplet.controllers.*.ipv4_address[count.index] 15 | } 16 | 17 | # Discrete DNS records for each controller's private IPv4 for etcd usage 18 | resource "digitalocean_record" "etcds" { 19 | count = var.controller_count 20 | 21 | # DNS zone where record should be created 22 | domain = var.dns_zone 23 | 24 | # DNS record (will be prepended to domain) 25 | name = "${var.cluster_name}-etcd${count.index}" 26 | type = "A" 27 | ttl = 300 28 | 29 | # private IPv4 address for etcd 30 | value = digitalocean_droplet.controllers.*.ipv4_address_private[count.index] 31 | } 32 | 33 | # Controller droplet instances 34 | resource "digitalocean_droplet" "controllers" { 35 | count = var.controller_count 36 | 37 | name = "${var.cluster_name}-controller-${count.index}" 38 | region = var.region 39 | 40 | image = var.os_image 41 | size = var.controller_type 42 | 43 | # network 44 | vpc_uuid = digitalocean_vpc.network.id 45 | # TODO: Only official DigitalOcean images support IPv6 46 | ipv6 = false 47 | 48 | user_data = data.ct_config.controllers.*.rendered[count.index] 49 | ssh_keys = var.ssh_fingerprints 50 | 51 | tags = [ 52 | digitalocean_tag.controllers.id, 53 | ] 54 | 55 | lifecycle { 56 | ignore_changes = [user_data] 57 | } 58 | } 59 | 60 | # Tag to label controllers 61 | resource "digitalocean_tag" "controllers" { 62 | name = "${var.cluster_name}-controller" 63 | } 64 | 65 | # Fedora CoreOS controllers 66 | data "ct_config" "controllers" { 67 | count = var.controller_count 68 | content = templatefile("${path.module}/butane/controller.yaml", { 69 | # Cannot use cyclic dependencies on controllers or their DNS records 70 | etcd_name = "etcd${count.index}" 71 | etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" 72 | # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,... 73 | etcd_initial_cluster = join(",", [ 74 | for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380" 75 | ]) 76 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 77 | }) 78 | strict = true 79 | snippets = var.controller_snippets 80 | } 81 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "controllers_dns" { 9 | value = digitalocean_record.controllers[0].fqdn 10 | } 11 | 12 | output "workers_dns" { 13 | # Multiple A and AAAA records with the same FQDN 14 | value = digitalocean_record.workers-record-a[0].fqdn 15 | } 16 | 17 | output "controllers_ipv4" { 18 | value = digitalocean_droplet.controllers.*.ipv4_address 19 | } 20 | 21 | output "controllers_ipv6" { 22 | value = digitalocean_droplet.controllers.*.ipv6_address 23 | } 24 | 25 | output "workers_ipv4" { 26 | value = digitalocean_droplet.workers.*.ipv4_address 27 | } 28 | 29 | output "workers_ipv6" { 30 | value = digitalocean_droplet.workers.*.ipv6_address 31 | } 32 | 33 | # Outputs for worker pools 34 | 35 | output "kubeconfig" { 36 | value = module.bootstrap.kubeconfig-kubelet 37 | sensitive = true 38 | } 39 | 40 | # Outputs for custom firewalls 41 | 42 | output "controller_tag" { 43 | description = "Tag applied to controller droplets" 44 | value = digitalocean_tag.controllers.name 45 | } 46 | 47 | output "worker_tag" { 48 | description = "Tag applied to worker droplets" 49 | value = digitalocean_tag.workers.name 50 | } 51 | 52 | # Outputs for custom load balancing 53 | 54 | output "vpc_id" { 55 | description = "ID of the cluster VPC" 56 | value = digitalocean_vpc.network.id 57 | } 58 | 59 | # Outputs for debug 60 | 61 | output "assets_dist" { 62 | value = module.bootstrap.assets_dist 63 | sensitive = true 64 | } 65 | 66 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | digitalocean_firewall.rules 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = digitalocean_droplet.controllers.*.ipv4_address[count.index] 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = module.bootstrap.kubeconfig-kubelet 28 | destination = "/home/core/kubeconfig" 29 | } 30 | 31 | provisioner "file" { 32 | content = join("\n", local.assets_bundle) 33 | destination = "/home/core/assets" 34 | } 35 | 36 | provisioner "remote-exec" { 37 | inline = [ 38 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 39 | "sudo touch /etc/kubernetes", 40 | "sudo /opt/bootstrap/layout", 41 | ] 42 | } 43 | } 44 | 45 | # Secure copy kubeconfig to all workers. Activates kubelet.service. 46 | resource "null_resource" "copy-worker-secrets" { 47 | count = var.worker_count 48 | 49 | connection { 50 | type = "ssh" 51 | host = digitalocean_droplet.workers.*.ipv4_address[count.index] 52 | user = "core" 53 | timeout = "15m" 54 | } 55 | 56 | provisioner "file" { 57 | content = module.bootstrap.kubeconfig-kubelet 58 | destination = "/home/core/kubeconfig" 59 | } 60 | 61 | provisioner "remote-exec" { 62 | inline = [ 63 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 64 | "sudo touch /etc/kubernetes", 65 | ] 66 | } 67 | } 68 | 69 | # Connect to a controller to perform one-time cluster bootstrap. 70 | resource "null_resource" "bootstrap" { 71 | depends_on = [ 72 | null_resource.copy-controller-secrets, 73 | null_resource.copy-worker-secrets, 74 | ] 75 | 76 | connection { 77 | type = "ssh" 78 | host = digitalocean_droplet.controllers[0].ipv4_address 79 | user = "core" 80 | timeout = "15m" 81 | } 82 | 83 | provisioner "remote-exec" { 84 | inline = [ 85 | "sudo systemctl start bootstrap", 86 | ] 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | null = ">= 2.1" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | digitalocean = { 12 | source = "digitalocean/digitalocean" 13 | version = ">= 2.12, < 3.0" 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | # Worker DNS records 2 | resource "digitalocean_record" "workers-record-a" { 3 | count = var.worker_count 4 | 5 | # DNS zone where record should be created 6 | domain = var.dns_zone 7 | 8 | name = "${var.cluster_name}-workers" 9 | type = "A" 10 | ttl = 300 11 | value = digitalocean_droplet.workers.*.ipv4_address[count.index] 12 | } 13 | 14 | /* 15 | # TODO: Only official DigitalOcean images support IPv6 16 | resource "digitalocean_record" "workers-record-aaaa" { 17 | count = var.worker_count 18 | 19 | # DNS zone where record should be created 20 | domain = var.dns_zone 21 | 22 | name = "${var.cluster_name}-workers" 23 | type = "AAAA" 24 | ttl = 300 25 | value = digitalocean_droplet.workers.*.ipv6_address[count.index] 26 | } 27 | */ 28 | 29 | # Worker droplet instances 30 | resource "digitalocean_droplet" "workers" { 31 | count = var.worker_count 32 | 33 | name = "${var.cluster_name}-worker-${count.index}" 34 | region = var.region 35 | 36 | image = var.os_image 37 | size = var.worker_type 38 | 39 | # network 40 | vpc_uuid = digitalocean_vpc.network.id 41 | # TODO: Only official DigitalOcean images support IPv6 42 | ipv6 = false 43 | 44 | user_data = data.ct_config.worker.rendered 45 | ssh_keys = var.ssh_fingerprints 46 | 47 | tags = [ 48 | digitalocean_tag.workers.id, 49 | ] 50 | 51 | lifecycle { 52 | create_before_destroy = true 53 | } 54 | } 55 | 56 | # Tag to label workers 57 | resource "digitalocean_tag" "workers" { 58 | name = "${var.cluster_name}-worker" 59 | } 60 | 61 | # Fedora CoreOS worker 62 | data "ct_config" "worker" { 63 | content = templatefile("${path.module}/butane/worker.yaml", { 64 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 65 | }) 66 | strict = true 67 | snippets = var.worker_snippets 68 | } 69 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/flatcar-linux/digitalocean/). 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = digitalocean_record.etcds.*.fqdn 8 | 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | components = var.components 13 | } 14 | 15 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/controllers.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | official_images = [] 3 | is_official_image = contains(local.official_images, var.os_image) 4 | } 5 | 6 | # Controller Instance DNS records 7 | resource "digitalocean_record" "controllers" { 8 | count = var.controller_count 9 | 10 | # DNS zone where record should be created 11 | domain = var.dns_zone 12 | 13 | # DNS record (will be prepended to domain) 14 | name = var.cluster_name 15 | type = "A" 16 | ttl = 300 17 | 18 | # IPv4 addresses of controllers 19 | value = digitalocean_droplet.controllers.*.ipv4_address[count.index] 20 | } 21 | 22 | # Discrete DNS records for each controller's private IPv4 for etcd usage 23 | resource "digitalocean_record" "etcds" { 24 | count = var.controller_count 25 | 26 | # DNS zone where record should be created 27 | domain = var.dns_zone 28 | 29 | # DNS record (will be prepended to domain) 30 | name = "${var.cluster_name}-etcd${count.index}" 31 | type = "A" 32 | ttl = 300 33 | 34 | # private IPv4 address for etcd 35 | value = digitalocean_droplet.controllers.*.ipv4_address_private[count.index] 36 | } 37 | 38 | # Controller droplet instances 39 | resource "digitalocean_droplet" "controllers" { 40 | count = var.controller_count 41 | 42 | name = "${var.cluster_name}-controller-${count.index}" 43 | region = var.region 44 | 45 | image = var.os_image 46 | size = var.controller_type 47 | 48 | # network 49 | vpc_uuid = digitalocean_vpc.network.id 50 | # TODO: Only official DigitalOcean images support IPv6 51 | ipv6 = false 52 | 53 | user_data = data.ct_config.controllers.*.rendered[count.index] 54 | ssh_keys = var.ssh_fingerprints 55 | 56 | tags = [ 57 | digitalocean_tag.controllers.id, 58 | ] 59 | 60 | lifecycle { 61 | ignore_changes = [user_data] 62 | } 63 | } 64 | 65 | # Tag to label controllers 66 | resource "digitalocean_tag" "controllers" { 67 | name = "${var.cluster_name}-controller" 68 | } 69 | 70 | # Flatcar Linux controllers 71 | data "ct_config" "controllers" { 72 | count = var.controller_count 73 | content = templatefile("${path.module}/butane/controller.yaml", { 74 | # Cannot use cyclic dependencies on controllers or their DNS records 75 | etcd_name = "etcd${count.index}" 76 | etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}" 77 | # etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,... 78 | etcd_initial_cluster = join(",", [ 79 | for i in range(var.controller_count) : "etcd${i}=https://${var.cluster_name}-etcd${i}.${var.dns_zone}:2380" 80 | ]) 81 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 82 | }) 83 | strict = true 84 | snippets = var.controller_snippets 85 | } 86 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "controllers_dns" { 9 | value = digitalocean_record.controllers[0].fqdn 10 | } 11 | 12 | output "workers_dns" { 13 | # Multiple A and AAAA records with the same FQDN 14 | value = digitalocean_record.workers-record-a[0].fqdn 15 | } 16 | 17 | output "controllers_ipv4" { 18 | value = digitalocean_droplet.controllers.*.ipv4_address 19 | } 20 | 21 | output "controllers_ipv6" { 22 | value = digitalocean_droplet.controllers.*.ipv6_address 23 | } 24 | 25 | output "workers_ipv4" { 26 | value = digitalocean_droplet.workers.*.ipv4_address 27 | } 28 | 29 | output "workers_ipv6" { 30 | value = digitalocean_droplet.workers.*.ipv6_address 31 | } 32 | 33 | # Outputs for worker pools 34 | 35 | output "kubeconfig" { 36 | value = module.bootstrap.kubeconfig-kubelet 37 | sensitive = true 38 | } 39 | 40 | # Outputs for custom firewalls 41 | 42 | output "controller_tag" { 43 | description = "Tag applied to controller droplets" 44 | value = digitalocean_tag.controllers.name 45 | } 46 | 47 | output "worker_tag" { 48 | description = "Tag applied to worker droplets" 49 | value = digitalocean_tag.workers.name 50 | } 51 | 52 | # Outputs for custom load balancing 53 | 54 | output "vpc_id" { 55 | description = "ID of the cluster VPC" 56 | value = digitalocean_vpc.network.id 57 | } 58 | 59 | # Outputs for debug 60 | 61 | output "assets_dist" { 62 | value = module.bootstrap.assets_dist 63 | sensitive = true 64 | } 65 | 66 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | digitalocean_firewall.rules 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = digitalocean_droplet.controllers.*.ipv4_address[count.index] 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = module.bootstrap.kubeconfig-kubelet 28 | destination = "/home/core/kubeconfig" 29 | } 30 | 31 | provisioner "file" { 32 | content = join("\n", local.assets_bundle) 33 | destination = "/home/core/assets" 34 | } 35 | 36 | provisioner "remote-exec" { 37 | inline = [ 38 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 39 | "sudo /opt/bootstrap/layout", 40 | ] 41 | } 42 | } 43 | 44 | # Secure copy kubeconfig to all workers. Activates kubelet.service. 45 | resource "null_resource" "copy-worker-secrets" { 46 | count = var.worker_count 47 | 48 | connection { 49 | type = "ssh" 50 | host = digitalocean_droplet.workers.*.ipv4_address[count.index] 51 | user = "core" 52 | timeout = "15m" 53 | } 54 | 55 | provisioner "file" { 56 | content = module.bootstrap.kubeconfig-kubelet 57 | destination = "/home/core/kubeconfig" 58 | } 59 | 60 | provisioner "remote-exec" { 61 | inline = [ 62 | "sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig", 63 | ] 64 | } 65 | } 66 | 67 | # Connect to a controller to perform one-time cluster bootstrap. 68 | resource "null_resource" "bootstrap" { 69 | depends_on = [ 70 | null_resource.copy-controller-secrets, 71 | null_resource.copy-worker-secrets, 72 | ] 73 | 74 | connection { 75 | type = "ssh" 76 | host = digitalocean_droplet.controllers[0].ipv4_address 77 | user = "core" 78 | timeout = "15m" 79 | } 80 | 81 | provisioner "remote-exec" { 82 | inline = [ 83 | "sudo systemctl start bootstrap", 84 | ] 85 | } 86 | } 87 | 88 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | null = ">= 2.1" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | digitalocean = { 12 | source = "digitalocean/digitalocean" 13 | version = ">= 2.12, < 3.0" 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /digital-ocean/flatcar-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | # Worker DNS records 2 | resource "digitalocean_record" "workers-record-a" { 3 | count = var.worker_count 4 | 5 | # DNS zone where record should be created 6 | domain = var.dns_zone 7 | 8 | name = "${var.cluster_name}-workers" 9 | type = "A" 10 | ttl = 300 11 | value = digitalocean_droplet.workers.*.ipv4_address[count.index] 12 | } 13 | 14 | resource "digitalocean_record" "workers-record-aaaa" { 15 | # only official DigitalOcean images support IPv6 16 | count = local.is_official_image ? var.worker_count : 0 17 | 18 | # DNS zone where record should be created 19 | domain = var.dns_zone 20 | 21 | name = "${var.cluster_name}-workers" 22 | type = "AAAA" 23 | ttl = 300 24 | value = digitalocean_droplet.workers.*.ipv6_address[count.index] 25 | } 26 | 27 | # Worker droplet instances 28 | resource "digitalocean_droplet" "workers" { 29 | count = var.worker_count 30 | 31 | name = "${var.cluster_name}-worker-${count.index}" 32 | region = var.region 33 | 34 | image = var.os_image 35 | size = var.worker_type 36 | 37 | # network 38 | vpc_uuid = digitalocean_vpc.network.id 39 | # only official DigitalOcean images support IPv6 40 | ipv6 = local.is_official_image 41 | 42 | user_data = data.ct_config.worker.rendered 43 | ssh_keys = var.ssh_fingerprints 44 | 45 | tags = [ 46 | digitalocean_tag.workers.id, 47 | ] 48 | 49 | lifecycle { 50 | create_before_destroy = true 51 | } 52 | } 53 | 54 | # Tag to label workers 55 | resource "digitalocean_tag" "workers" { 56 | name = "${var.cluster_name}-worker" 57 | } 58 | 59 | # Flatcar Linux worker 60 | data "ct_config" "worker" { 61 | content = templatefile("${path.module}/butane/worker.yaml", { 62 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 63 | }) 64 | strict = true 65 | snippets = var.worker_snippets 66 | } 67 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | typhoon.psdn.io -------------------------------------------------------------------------------- /docs/addons/fleetlock.md: -------------------------------------------------------------------------------- 1 | ## fleetlock 2 | 3 | [fleetlock](https://github.com/poseidon/fleetlock) is a reboot coordinator for Fedora CoreOS nodes. It implements the [FleetLock](https://github.com/coreos/airlock/pull/1/files) protocol for use as a [Zincati](https://github.com/coreos/zincati) lock [strategy](https://github.com/coreos/zincati/blob/master/docs/usage/updates-strategy.md) backend. 4 | 5 | Declare a Zincati `fleet_lock` strategy when provisioning Fedora CoreOS nodes via [snippets](/advanced/customization/#hosts). 6 | 7 | ```yaml 8 | variant: fcos 9 | version: 1.5.0 10 | storage: 11 | files: 12 | - path: /etc/zincati/config.d/55-update-strategy.toml 13 | contents: 14 | inline: | 15 | [updates] 16 | strategy = "fleet_lock" 17 | [updates.fleet_lock] 18 | base_url = "http://10.3.0.15/" 19 | ``` 20 | 21 | ```tf 22 | module "nemo" { 23 | ... 24 | controller_snippets = [ 25 | file("./snippets/zincati-strategy.yaml"), 26 | ] 27 | worker_snippets = [ 28 | file("./snippets/zincati-strategy.yaml"), 29 | ] 30 | } 31 | ``` 32 | 33 | Apply fleetlock based on the example manifests. 34 | 35 | ```sh 36 | git clone git@github.com:poseidon/fleetlock.git 37 | kubectl apply -f examples/k8s 38 | ``` 39 | 40 | -------------------------------------------------------------------------------- /docs/addons/grafana.md: -------------------------------------------------------------------------------- 1 | ## Grafana 2 | 3 | Grafana can be used to build dashboards and visualizations that use Prometheus as the datasource. Create the grafana deployment and service. 4 | 5 | ``` 6 | kubectl apply -f addons/grafana -R 7 | ``` 8 | 9 | Use `kubectl` to authenticate to the apiserver and create a local port-forward to the Grafana pod. 10 | 11 | ``` 12 | kubectl port-forward grafana-POD-ID 8080 -n monitoring 13 | ``` 14 | 15 | Visit [127.0.0.1:8080](http://127.0.0.1:8080) to view the bundled dashboards. 16 | 17 | ![Grafana etcd](../img/grafana-etcd.png) 18 | ![Grafana resources cluster](../img/grafana-resources-cluster.png) 19 | ![Grafana usage cluster](../img/grafana-usage-cluster.png) 20 | ![Grafana usage node](../img/grafana-usage-node.png) 21 | 22 | -------------------------------------------------------------------------------- /docs/addons/prometheus.md: -------------------------------------------------------------------------------- 1 | # Prometheus 2 | 3 | Prometheus collects metrics (e.g. `node_memory_usage_bytes`) from *targets* by scraping their HTTP metrics endpoints. Targets are organized into *jobs*, defined in the Prometheus config. Targets may expose counter, gauge, histogram, or summary metrics. 4 | 5 | Here's a simple config from the Prometheus [tutorial](https://prometheus.io/docs/introduction/getting_started/). 6 | 7 | ``` 8 | global: 9 | scrape_interval: 15s 10 | scrape_configs: 11 | - job_name: 'prometheus' 12 | scrape_interval: 5s 13 | static_configs: 14 | - targets: ['localhost:9090'] 15 | ``` 16 | 17 | On Kubernetes clusters, Prometheus is run as a Deployment, configured with a ConfigMap, and accessed via a Service or Ingress. 18 | 19 | ``` 20 | kubectl apply -f addons/prometheus -R 21 | ``` 22 | 23 | The ConfigMap configures Prometheus to discover apiservers, kubelets, cAdvisor, services, endpoints, and exporters. By default, data is kept in an `emptyDir` so it is persisted until the pod is rescheduled. 24 | 25 | ### Exporters 26 | 27 | Exporters expose metrics for 3rd-party systems that don't natively expose Prometheus metrics. 28 | 29 | * [node_exporter](https://github.com/prometheus/node_exporter) - DaemonSet that exposes a machine's hardware and OS metrics 30 | * [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) - Deployment that exposes Kubernetes object metrics 31 | * [blackbox_exporter](https://github.com/prometheus/blackbox_exporter) - Scrapes HTTP, HTTPS, DNS, TCP, or ICMP endpoints and exposes availability as metrics 32 | 33 | ### Queries and Alerts 34 | 35 | Prometheus provides a basic UI for querying metrics and viewing alerts. Use `kubectl` to authenticate to the apiserver and create a local port-forward to the Prometheus pod. 36 | 37 | ``` 38 | kubectl get pods -n monitoring 39 | kubectl port-forward prometheus-POD-ID 9090 -n monitoring 40 | ``` 41 | 42 | Visit [127.0.0.1:9090](http://127.0.0.1:9090) to query [expressions](http://127.0.0.1:9090/graph), view [targets](http://127.0.0.1:9090/targets), or check [alerts](http://127.0.0.1:9090/alerts). 43 | 44 | ![Prometheus Graph](../img/prometheus-graph.png) 45 |
46 | ![Prometheus Targets](../img/prometheus-targets.png) 47 |
48 | ![Prometheus Alerts](../img/prometheus-alerts.png) 49 | 50 | Use [Grafana](/addons/grafana/) to view or build dashboards that use Prometheus as the datasource. 51 | -------------------------------------------------------------------------------- /docs/advanced/overview.md: -------------------------------------------------------------------------------- 1 | # Advanced 2 | 3 | Typhoon clusters offer several advanced features for skilled users. 4 | 5 | * [ARM64](arm64.md) 6 | * [Customization](customization.md) 7 | * [Nodes](nodes.md) 8 | * [Worker Pools](worker-pools.md) 9 | -------------------------------------------------------------------------------- /docs/architecture/bare-metal.md: -------------------------------------------------------------------------------- 1 | # Bare-Metal 2 | 3 | ## Load Balancing 4 | 5 | ### kube-apiserver 6 | 7 | Load balancing across controller nodes with a healthy `kube-apiserver` is determined by your unique bare-metal environment and its capabilities. 8 | 9 | ### HTTP/HTTPS Ingress 10 | 11 | Load balancing across worker nodes with a healthy Ingress Controller is determined by your unique bare-metal environment and its capabilities. 12 | 13 | See the `nginx-ingress` addon to run [Nginx as the Ingress Controller](/addons/ingress/#bare-metal) for bare-metal. 14 | 15 | ### TCP/UDP Services 16 | 17 | Load balancing across worker nodes with TCP/UDP services is determined by your unique bare-metal environment and its capabilities. 18 | 19 | ## IPv6 20 | 21 | Status of IPv6 on Typhoon bare-metal clusters. 22 | 23 | | IPv6 Feature | Supported | 24 | |-------------------------|-----------| 25 | | Node IPv6 address | Yes | 26 | | Node Outbound IPv6 | Yes | 27 | | Kubernetes Ingress IPv6 | Possible | 28 | 29 | IPv6 support depends upon the bare-metal network environment. 30 | -------------------------------------------------------------------------------- /docs/architecture/operating-systems.md: -------------------------------------------------------------------------------- 1 | # Operating Systems 2 | 3 | Typhoon supports [Fedora CoreOS](https://getfedora.org/coreos/) and [Flatcar Linux](https://www.flatcar-linux.org/). These operating systems were chosen because they offer: 4 | 5 | * Minimalism and focus on clustered operation 6 | * Automated and atomic operating system upgrades 7 | * Declarative and immutable configuration 8 | * Optimization for containerized applications 9 | 10 | Together, they diversify Typhoon to support a range of container technologies. 11 | 12 | * Fedora CoreOS: rpm-ostree, podman, containerd 13 | * Flatcar Linux: Gentoo core, docker, containerd 14 | 15 | ## Host Properties 16 | 17 | | Property | Flatcar Linux | Fedora CoreOS | 18 | |-------------------|---------------|---------------| 19 | | Kernel | ~5.15.x | ~6.5.x | 20 | | systemd | 252 | 254 | 21 | | Username | core | core | 22 | | Ignition system | Ignition v3.x spec | Ignition v3.x spec | 23 | | storage driver | overlay2 (extfs) | overlay2 (xfs) | 24 | | logging driver | json-file | journald | 25 | | cgroup driver | systemd | systemd | 26 | | cgroup version | v2 | v2 | 27 | | Networking | systemd-networkd | NetworkManager | 28 | | Resolver | systemd-resolved | systemd-resolved | 29 | 30 | ## Kubernetes Properties 31 | 32 | | Property | Flatcar Linux | Fedora CoreOS | 33 | |-------------------|---------------|---------------| 34 | | single-master | all platforms | all platforms | 35 | | multi-master | all platforms | all platforms | 36 | | control plane | static pods | static pods | 37 | | Container Runtime | containerd 1.5.9 | containerd 1.6.0 | 38 | | kubelet image | kubelet [image](https://github.com/poseidon/kubelet) with upstream binary | kubelet [image](https://github.com/poseidon/kubelet) with upstream binary | 39 | | control plane images | upstream images | upstream images | 40 | | on-host etcd | docker | podman | 41 | | on-host kubelet | docker | podman | 42 | | CNI plugins | cilium, flannel | cilium, flannel | 43 | | coordinated drain & OS update | [FLUO](https://github.com/kinvolk/flatcar-linux-update-operator) addon | [fleetlock](https://github.com/poseidon/fleetlock) | 44 | 45 | ## Directory Locations 46 | 47 | Typhoon conventional directories. 48 | 49 | | Kubelet setting | Host location | 50 | |-------------------|--------------------------------| 51 | | cni-conf-dir | /etc/cni/net.d | 52 | | pod-manifest-path | /etc/kubernetes/manifests | 53 | | volume-plugin-dir | /var/lib/kubelet/volumeplugins | 54 | 55 | -------------------------------------------------------------------------------- /docs/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/favicon.ico -------------------------------------------------------------------------------- /docs/img/grafana-etcd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/grafana-etcd.png -------------------------------------------------------------------------------- /docs/img/grafana-resources-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/grafana-resources-cluster.png -------------------------------------------------------------------------------- /docs/img/grafana-usage-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/grafana-usage-cluster.png -------------------------------------------------------------------------------- /docs/img/grafana-usage-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/grafana-usage-node.png -------------------------------------------------------------------------------- /docs/img/prometheus-alerts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/prometheus-alerts.png -------------------------------------------------------------------------------- /docs/img/prometheus-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/prometheus-graph.png -------------------------------------------------------------------------------- /docs/img/prometheus-targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/prometheus-targets.png -------------------------------------------------------------------------------- /docs/img/spin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/spin.png -------------------------------------------------------------------------------- /docs/img/typhoon-aws-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/typhoon-aws-load-balancing.png -------------------------------------------------------------------------------- /docs/img/typhoon-azure-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/typhoon-azure-load-balancing.png -------------------------------------------------------------------------------- /docs/img/typhoon-digitalocean-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/typhoon-digitalocean-load-balancing.png -------------------------------------------------------------------------------- /docs/img/typhoon-gcp-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/typhoon-gcp-load-balancing.png -------------------------------------------------------------------------------- /docs/img/typhoon-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/typhoon-logo.png -------------------------------------------------------------------------------- /docs/img/typhoon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidon/typhoon/329d9e42c98306321a3617096a903013f4d63cd2/docs/img/typhoon.png -------------------------------------------------------------------------------- /docs/topics/faq.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | ## Terraform 4 | 5 | Typhoon provides a Terraform Module for each supported operating system and platform. Terraform is considered a *format* detail, much like a Linux distro might provide images in the qcow2 or ISO format. It is a mechanism for sharing Typhoon in a way that works for many users. 6 | 7 | Formats rise and evolve. Typhoon may choose to adapt the format over time (with lots of forewarning). However, the authors' have built several Kubernetes "distros" before and learned from mistakes - Terraform modules are the right format for now. 8 | 9 | ## Security Issues 10 | 11 | If you find security issues, please see [security disclosures](/topics/security/#disclosures). 12 | 13 | ## Maintainers 14 | 15 | Typhoon clusters are Kubernetes clusters the maintainers use in real-world, production clusters. 16 | 17 | * Maintainers must personally operate a bare-metal and cloud provider cluster and strive to exercise it in real-world scenarios 18 | 19 | We merge features that are along the "blessed path". We minimize options to reduce complexity and matrix size. We remove outdated materials to reduce sprawl. "Skate where the puck is going", but also "wait until the fit is right". No is temporary, yes is forever. 20 | -------------------------------------------------------------------------------- /docs/topics/performance.md: -------------------------------------------------------------------------------- 1 | # Performance 2 | 3 | ## Provision Time 4 | 5 | Provisioning times vary based on the operating system and platform. Sampling the time to create (apply) and destroy clusters with 1 controller and 2 workers shows (roughly) what to expect. 6 | 7 | | Platform | Apply | Destroy | 8 | |---------------|-------|---------| 9 | | AWS | 5 min | 3 min | 10 | | Azure | 10 min | 7 min | 11 | | Bare-Metal | 10-15 min | NA | 12 | | Digital Ocean | 3 min 30 sec | 20 sec | 13 | | Google Cloud | 8 min | 5 min | 14 | 15 | Notes: 16 | 17 | * SOA TTL and NXDOMAIN caching can have a large impact on provision time 18 | * Platforms with auto-scaling take more time to provision (AWS, Azure, Google) 19 | * Bare-metal POST times and network bandwidth will affect provision times 20 | 21 | ## Network Performance 22 | 23 | Network performance varies based on the platform and CNI plugin. `iperf` was used to measure the bandwidth between different hosts and different pods. Host-to-host shows typical bandwidth between host machines. Pod-to-pod shows the bandwidth between two `iperf` containers. 24 | 25 | | Platform / Plugin | Theory | Host to Host | Pod to Pod | 26 | |----------------------------|-------:|-------------:|-------------:| 27 | | AWS (flannel) | 5 Gb/s | 4.94 Gb/s | 4.89 Gb/s | 28 | | AWS (calico, MTU 1480) | 5 Gb/s | 4.94 Gb/s | 4.42 Gb/s | 29 | | AWS (calico, MTU 8981) | 5 Gb/s | 4.94 Gb/s | 4.90 Gb/s | 30 | | Azure (flannel) | Varies | 749 Mb/s | 650 Mb/s | 31 | | Azure (calico) | Varies | 749 Mb/s | 650 Mb/s | 32 | | Bare-Metal (flannel) | 1 Gb/s | 940 Mb/s | 903 Mb/s | 33 | | Bare-Metal (calico) | 1 Gb/s | 940 Mb/s | 931 Mb/s | 34 | | Digital Ocean (flannel) | Varies | 1.97 Gb/s | 1.20 Gb/s | 35 | | Digital Ocean (calico) | Varies | 1.97 Gb/s | 1.20 Gb/s | 36 | | Google Cloud (flannel) | 2 Gb/s | 1.94 Gb/s | 1.76 Gb/s | 37 | | Google Cloud (calico) | 2 Gb/s | 1.94 Gb/s | 1.81 Gb/s | 38 | 39 | Notes: 40 | 41 | * Cilium and Flannel have comparable performance. Platform and configuration differences dominate. 42 | * Azure and DigitalOcean network performance can be quite variable or depend on machine type 43 | * Only [certain AWS EC2 instance types](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) allow jumbo frames. This is why the default MTU on AWS must be 1480. 44 | 45 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Typhoon Authors 4 | Copyright (c) 2020 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/fedora-coreos/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/fedora-coreos/google-cloud/). 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")] 7 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 8 | service_account_issuer = var.service_account_issuer 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | daemonset_tolerations = var.daemonset_tolerations 13 | components = var.components 14 | 15 | // temporary 16 | external_apiserver_port = 443 17 | } 18 | 19 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/image.tf: -------------------------------------------------------------------------------- 1 | 2 | # Fedora CoreOS most recent image from stream 3 | data "google_compute_image" "fedora-coreos" { 4 | project = "fedora-coreos-cloud" 5 | family = "fedora-coreos-${var.os_stream}" 6 | } 7 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "ingress_static_ipv4" { 9 | description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller" 10 | value = google_compute_global_address.ingress-ipv4.address 11 | } 12 | 13 | output "ingress_static_ipv6" { 14 | description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller" 15 | value = google_compute_global_address.ingress-ipv6.address 16 | } 17 | 18 | # Outputs for worker pools 19 | 20 | output "network_name" { 21 | value = google_compute_network.network.name 22 | } 23 | 24 | output "kubeconfig" { 25 | value = module.bootstrap.kubeconfig-kubelet 26 | sensitive = true 27 | } 28 | 29 | # Outputs for custom firewalling 30 | 31 | output "network_self_link" { 32 | value = google_compute_network.network.self_link 33 | } 34 | 35 | # Outputs for custom load balancing 36 | 37 | output "worker_instance_group" { 38 | description = "Worker managed instance group full URL" 39 | value = module.workers.instance_group 40 | } 41 | 42 | output "worker_target_pool" { 43 | description = "Worker target pool self link" 44 | value = module.workers.target_pool 45 | } 46 | 47 | # Outputs for debug 48 | 49 | output "assets_dist" { 50 | value = module.bootstrap.assets_dist 51 | sensitive = true 52 | } 53 | 54 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = local.controllers_ipv4_public[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "/home/core/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | google_dns_record_set.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = local.controllers_ipv4_public[0] 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | google = ">= 2.19" 7 | null = ">= 2.1" 8 | ct = { 9 | source = "poseidon/ct" 10 | version = "~> 0.13" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | cluster_name = var.cluster_name 5 | 6 | # GCE 7 | region = var.region 8 | network = google_compute_network.network.name 9 | worker_count = var.worker_count 10 | machine_type = var.worker_type 11 | os_stream = var.os_stream 12 | disk_size = var.worker_disk_size 13 | disk_type = var.worker_disk_type 14 | preemptible = var.worker_preemptible 15 | 16 | # configuration 17 | kubeconfig = module.bootstrap.kubeconfig-kubelet 18 | ssh_authorized_key = var.ssh_authorized_key 19 | service_cidr = var.service_cidr 20 | snippets = var.worker_snippets 21 | node_labels = var.worker_node_labels 22 | } 23 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/image.tf: -------------------------------------------------------------------------------- 1 | 2 | # Fedora CoreOS most recent image from stream 3 | data "google_compute_image" "fedora-coreos" { 4 | project = "fedora-coreos-cloud" 5 | family = "fedora-coreos-${var.os_stream}" 6 | } 7 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | # Outputs for global load balancing 2 | 3 | output "instance_group" { 4 | description = "Worker managed instance group full URL" 5 | value = google_compute_region_instance_group_manager.workers.instance_group 6 | } 7 | 8 | # Outputs for regional load balancing 9 | 10 | output "target_pool" { 11 | description = "Worker target pool self link" 12 | value = google_compute_target_pool.workers.self_link 13 | } 14 | 15 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/target_pool.tf: -------------------------------------------------------------------------------- 1 | # Target pool for TCP/UDP load balancing 2 | resource "google_compute_target_pool" "workers" { 3 | name = "${var.name}-worker-pool" 4 | region = var.region 5 | session_affinity = "NONE" 6 | 7 | health_checks = [ 8 | google_compute_http_health_check.workers.name, 9 | ] 10 | } 11 | 12 | # HTTP Health Check (for TCP/UDP load balancing) 13 | # Forward rules (regional) to target pools don't support different external 14 | # and internal ports. Health check for nodes with Ingress controllers that 15 | # may support proxying or otherwise satisfy the check. 16 | resource "google_compute_http_health_check" "workers" { 17 | name = "${var.name}-target-pool-health" 18 | description = "Health check for the worker target pool" 19 | 20 | port = 10254 21 | request_path = "/healthz" 22 | } 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | google = ">= 2.19" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.33.1 (upstream) 15 | * Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/flatcar-linux/google-cloud/). 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=2c7e6272016a0bb7fb0ba0fb74b5de5753fe934e" 4 | 5 | cluster_name = var.cluster_name 6 | etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")] 7 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 8 | service_account_issuer = var.service_account_issuer 9 | networking = var.networking 10 | pod_cidr = var.pod_cidr 11 | service_cidr = var.service_cidr 12 | daemonset_tolerations = var.daemonset_tolerations 13 | components = var.components 14 | 15 | // temporary 16 | external_apiserver_port = 443 17 | } 18 | 19 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/image.tf: -------------------------------------------------------------------------------- 1 | # Flatcar Linux most recent image from channel 2 | data "google_compute_image" "flatcar-linux" { 3 | project = "kinvolk-public" 4 | family = var.os_image 5 | } 6 | 7 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | sensitive = true 4 | } 5 | 6 | # Outputs for Kubernetes Ingress 7 | 8 | output "ingress_static_ipv4" { 9 | description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller" 10 | value = google_compute_global_address.ingress-ipv4.address 11 | } 12 | 13 | output "ingress_static_ipv6" { 14 | description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller" 15 | value = google_compute_global_address.ingress-ipv6.address 16 | } 17 | 18 | # Outputs for worker pools 19 | 20 | output "network_name" { 21 | value = google_compute_network.network.name 22 | } 23 | 24 | output "kubeconfig" { 25 | value = module.bootstrap.kubeconfig-kubelet 26 | sensitive = true 27 | } 28 | 29 | # Outputs for custom firewalling 30 | 31 | output "network_self_link" { 32 | value = google_compute_network.network.self_link 33 | } 34 | 35 | # Outputs for custom load balancing 36 | 37 | output "worker_instance_group" { 38 | description = "Worker managed instance group full URL" 39 | value = module.workers.instance_group 40 | } 41 | 42 | output "worker_target_pool" { 43 | description = "Worker target pool self link" 44 | value = module.workers.target_pool 45 | } 46 | 47 | # Outputs for debug 48 | 49 | output "assets_dist" { 50 | value = module.bootstrap.assets_dist 51 | sensitive = true 52 | } 53 | 54 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = local.controllers_ipv4_public[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "/home/core/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | google_dns_record_set.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = local.controllers_ipv4_public[0] 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | google = ">= 2.19" 7 | null = ">= 2.1" 8 | ct = { 9 | source = "poseidon/ct" 10 | version = "~> 0.13" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | cluster_name = var.cluster_name 5 | 6 | # GCE 7 | region = var.region 8 | network = google_compute_network.network.name 9 | worker_count = var.worker_count 10 | machine_type = var.worker_type 11 | os_image = var.os_image 12 | disk_size = var.worker_disk_size 13 | disk_type = var.worker_disk_type 14 | preemptible = var.worker_preemptible 15 | 16 | # configuration 17 | kubeconfig = module.bootstrap.kubeconfig-kubelet 18 | ssh_authorized_key = var.ssh_authorized_key 19 | service_cidr = var.service_cidr 20 | snippets = var.worker_snippets 21 | node_labels = var.worker_node_labels 22 | } 23 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/workers/image.tf: -------------------------------------------------------------------------------- 1 | # Flatcar Linux most recent image from channel 2 | data "google_compute_image" "flatcar-linux" { 3 | project = "kinvolk-public" 4 | family = var.os_image 5 | } 6 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | # Outputs for global load balancing 2 | 3 | output "instance_group" { 4 | description = "Worker managed instance group full URL" 5 | value = google_compute_region_instance_group_manager.workers.instance_group 6 | } 7 | 8 | # Outputs for regional load balancing 9 | 10 | output "target_pool" { 11 | description = "Worker target pool self link" 12 | value = google_compute_target_pool.workers.self_link 13 | } 14 | 15 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/workers/target_pool.tf: -------------------------------------------------------------------------------- 1 | # Target pool for TCP/UDP load balancing 2 | resource "google_compute_target_pool" "workers" { 3 | name = "${var.name}-worker-pool" 4 | region = var.region 5 | session_affinity = "NONE" 6 | 7 | health_checks = [ 8 | google_compute_http_health_check.workers.name, 9 | ] 10 | } 11 | 12 | # HTTP Health Check (for TCP/UDP load balancing) 13 | # Forward rules (regional) to target pools don't support different external 14 | # and internal ports. Health check for nodes with Ingress controllers that 15 | # may support proxying or otherwise satisfy the check. 16 | resource "google_compute_http_health_check" "workers" { 17 | name = "${var.name}-target-pool-health" 18 | description = "Health check for the worker target pool" 19 | 20 | port = 10254 21 | request_path = "/healthz" 22 | } 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/flatcar-linux/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = ">= 0.13.0, < 2.0.0" 5 | required_providers { 6 | google = ">= 2.19" 7 | ct = { 8 | source = "poseidon/ct" 9 | version = "~> 0.13" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs==1.6.1 2 | mkdocs-material==9.6.14 3 | pygments==2.19.1 4 | pymdown-extensions==10.15 5 | -------------------------------------------------------------------------------- /theme/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block extrahead %} 4 | 5 | 6 | 7 | 8 | 9 | 10 | {% endblock %} 11 | --------------------------------------------------------------------------------