├── docs ├── img │ ├── spin.png │ ├── favicon.ico │ ├── typhoon.png │ ├── grafana-etcd.png │ ├── typhoon-logo.png │ ├── prometheus-graph.png │ ├── grafana-usage-node.png │ ├── prometheus-alerts.png │ ├── prometheus-targets.png │ ├── grafana-usage-cluster.png │ ├── grafana-resources-cluster.png │ ├── typhoon-aws-load-balancing.png │ ├── typhoon-gcp-load-balancing.png │ ├── typhoon-azure-load-balancing.png │ └── typhoon-digitalocean-load-balancing.png ├── advanced │ └── overview.md ├── addons │ ├── overview.md │ ├── grafana.md │ └── prometheus.md ├── architecture │ ├── bare-metal.md │ ├── operating-systems.md │ └── azure.md └── topics │ ├── faq.md │ └── performance.md ├── aws ├── container-linux │ └── kubernetes │ │ ├── workers │ │ ├── versions.tf │ │ ├── outputs.tf │ │ ├── ami.tf │ │ ├── ingress.tf │ │ └── workers.tf │ │ ├── versions.tf │ │ ├── bootstrap.tf │ │ ├── workers.tf │ │ ├── LICENSE │ │ ├── ami.tf │ │ ├── ssh.tf │ │ ├── README.md │ │ ├── outputs.tf │ │ ├── network.tf │ │ ├── nlb.tf │ │ └── controllers.tf └── fedora-coreos │ └── kubernetes │ ├── workers │ ├── versions.tf │ ├── outputs.tf │ ├── ami.tf │ ├── ingress.tf │ └── workers.tf │ ├── versions.tf │ ├── ami.tf │ ├── bootstrap.tf │ ├── workers.tf │ ├── LICENSE │ ├── ssh.tf │ ├── README.md │ ├── outputs.tf │ ├── network.tf │ ├── nlb.tf │ └── controllers.tf ├── azure ├── fedora-coreos │ └── kubernetes │ │ ├── workers │ │ ├── versions.tf │ │ ├── variables.tf │ │ └── workers.tf │ │ ├── versions.tf │ │ ├── workers.tf │ │ ├── bootstrap.tf │ │ ├── LICENSE │ │ ├── ssh.tf │ │ ├── outputs.tf │ │ ├── README.md │ │ └── network.tf └── container-linux │ └── kubernetes │ ├── workers │ ├── versions.tf │ └── variables.tf │ ├── versions.tf │ ├── bootstrap.tf │ ├── workers.tf │ ├── LICENSE │ ├── ssh.tf │ ├── README.md │ ├── outputs.tf │ └── network.tf ├── requirements.txt ├── google-cloud ├── container-linux │ └── kubernetes │ │ ├── workers │ │ ├── versions.tf │ │ ├── outputs.tf │ │ ├── target_pool.tf │ │ └── workers.tf │ │ ├── versions.tf │ │ ├── workers.tf │ │ ├── bootstrap.tf │ │ ├── LICENSE │ │ ├── outputs.tf │ │ ├── ssh.tf │ │ └── README.md └── fedora-coreos │ └── kubernetes │ ├── workers │ ├── versions.tf │ ├── image.tf │ ├── outputs.tf │ ├── target_pool.tf │ └── workers.tf │ ├── image.tf │ ├── versions.tf │ ├── workers.tf │ ├── bootstrap.tf │ ├── LICENSE │ ├── outputs.tf │ ├── ssh.tf │ └── README.md ├── bare-metal ├── fedora-coreos │ └── kubernetes │ │ ├── outputs.tf │ │ ├── versions.tf │ │ ├── groups.tf │ │ ├── bootstrap.tf │ │ ├── LICENSE │ │ ├── README.md │ │ └── ssh.tf └── container-linux │ └── kubernetes │ ├── outputs.tf │ ├── versions.tf │ ├── bootstrap.tf │ ├── LICENSE │ ├── groups.tf │ ├── README.md │ └── cl │ └── install.yaml ├── addons ├── prometheus │ ├── service-account.yaml │ ├── 0-namespace.yaml │ ├── exporters │ │ ├── node-exporter │ │ │ ├── service-account.yaml │ │ │ ├── service.yaml │ │ │ └── daemonset.yaml │ │ └── kube-state-metrics │ │ │ ├── service-account.yaml │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── service.yaml │ │ │ ├── deployment.yaml │ │ │ └── cluster-role.yaml │ ├── rbac │ │ ├── cluster-role-binding.yaml │ │ └── cluster-role.yaml │ ├── service.yaml │ ├── discovery │ │ ├── kube-scheduler.yaml │ │ ├── kube-proxy.yaml │ │ └── kube-controller-manager.yaml │ ├── network-policy.yaml │ └── deployment.yaml ├── nginx-ingress │ ├── aws │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── role-binding.yaml │ │ │ ├── role.yaml │ │ │ └── cluster-role.yaml │ │ ├── service.yaml │ │ └── deployment.yaml │ ├── azure │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── role-binding.yaml │ │ │ ├── role.yaml │ │ │ └── cluster-role.yaml │ │ ├── service.yaml │ │ └── deployment.yaml │ ├── bare-metal │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── role-binding.yaml │ │ │ ├── role.yaml │ │ │ └── cluster-role.yaml │ │ ├── service.yaml │ │ └── deployment.yaml │ ├── digital-ocean │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── rbac │ │ │ ├── cluster-role-binding.yaml │ │ │ ├── role-binding.yaml │ │ │ ├── role.yaml │ │ │ └── cluster-role.yaml │ │ ├── service.yaml │ │ └── daemonset.yaml │ └── google-cloud │ │ ├── 0-namespace.yaml │ │ ├── class.yaml │ │ ├── rbac │ │ ├── cluster-role-binding.yaml │ │ ├── role-binding.yaml │ │ ├── role.yaml │ │ └── cluster-role.yaml │ │ ├── service.yaml │ │ └── deployment.yaml └── grafana │ ├── providers.yaml │ ├── service.yaml │ ├── datasources.yaml │ └── config.yaml ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ └── bug_report.md ├── PULL_REQUEST_TEMPLATE.md └── issue_template.md ├── digital-ocean ├── container-linux │ └── kubernetes │ │ ├── versions.tf │ │ ├── bootstrap.tf │ │ ├── LICENSE │ │ ├── outputs.tf │ │ ├── README.md │ │ ├── workers.tf │ │ └── ssh.tf └── fedora-coreos │ └── kubernetes │ ├── versions.tf │ ├── bootstrap.tf │ ├── LICENSE │ ├── outputs.tf │ ├── README.md │ ├── workers.tf │ └── ssh.tf ├── CONTRIBUTING.md ├── LICENSE ├── DCO └── mkdocs.yml /docs/img/spin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/spin.png -------------------------------------------------------------------------------- /docs/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/favicon.ico -------------------------------------------------------------------------------- /docs/img/typhoon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/typhoon.png -------------------------------------------------------------------------------- /docs/img/grafana-etcd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/grafana-etcd.png -------------------------------------------------------------------------------- /docs/img/typhoon-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/typhoon-logo.png -------------------------------------------------------------------------------- /docs/img/prometheus-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/prometheus-graph.png -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /docs/img/grafana-usage-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/grafana-usage-node.png -------------------------------------------------------------------------------- /docs/img/prometheus-alerts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/prometheus-alerts.png -------------------------------------------------------------------------------- /docs/img/prometheus-targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/prometheus-targets.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs==1.1.2 2 | mkdocs-material==5.3.3 3 | pygments==2.6.1 4 | pymdown-extensions==7.1.0 5 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /docs/img/grafana-usage-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/grafana-usage-cluster.png -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /docs/img/grafana-resources-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/grafana-resources-cluster.png -------------------------------------------------------------------------------- /docs/img/typhoon-aws-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/typhoon-aws-load-balancing.png -------------------------------------------------------------------------------- /docs/img/typhoon-gcp-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/typhoon-gcp-load-balancing.png -------------------------------------------------------------------------------- /docs/img/typhoon-azure-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/typhoon-azure-load-balancing.png -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | -------------------------------------------------------------------------------- /addons/prometheus/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /docs/img/typhoon-digitalocean-load-balancing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zen/typhoon/master/docs/img/typhoon-digitalocean-load-balancing.png -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/prometheus/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | labels: 6 | name: monitoring 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/0-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress 5 | labels: 6 | name: ingress 7 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/node-exporter/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/ingress-nginx 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/ingress-nginx 7 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/ingress-nginx 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/ingress-nginx 7 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: IngressClass 3 | metadata: 4 | name: public 5 | spec: 6 | controller: k8s.io/ingress-nginx 7 | -------------------------------------------------------------------------------- /docs/advanced/overview.md: -------------------------------------------------------------------------------- 1 | # Advanced 2 | 3 | Typhoon clusters offer several advanced features for skilled users. 4 | 5 | * [Customization](customization.md) 6 | * [Worker Pools](worker-pools.md) 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Security 4 | url: https://typhoon.psdn.io/topics/security/ 5 | about: Report security vulnerabilities 6 | -------------------------------------------------------------------------------- /docs/addons/overview.md: -------------------------------------------------------------------------------- 1 | # Addons 2 | 3 | Every Typhoon cluster is verified to work well with several post-install addons. 4 | 5 | * Nginx [Ingress Controller](ingress.md) 6 | * [Prometheus](prometheus.md) 7 | * [Grafana](grafana.md) 8 | 9 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/image.tf: -------------------------------------------------------------------------------- 1 | 2 | # Fedora CoreOS most recent image from stream 3 | data "google_compute_image" "fedora-coreos" { 4 | project = "fedora-coreos-cloud" 5 | family = "fedora-coreos-${var.os_stream}" 6 | } 7 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/image.tf: -------------------------------------------------------------------------------- 1 | 2 | # Fedora CoreOS most recent image from stream 3 | data "google_compute_image" "fedora-coreos" { 4 | project = "fedora-coreos-cloud" 5 | family = "fedora-coreos-${var.os_stream}" 6 | } 7 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | High level description of the change. 2 | 3 | * Specific change 4 | * Specific change 5 | 6 | ## Testing 7 | 8 | Describe your work to validate the change works. 9 | 10 | rel: issue number (if applicable) 11 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | aws = "~> 2.23" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | aws = "~> 2.23" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | azurerm = "~> 2.8" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | azurerm = "~> 2.8" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | matchbox = "~> 0.3.0" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | matchbox = "~> 0.3.0" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | google = ">= 2.19, < 4.0" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | google = ">= 2.19, < 4.0" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | digitalocean = "~> 1.16" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version and plugin versions 2 | 3 | terraform { 4 | required_version = "~> 0.12.6" 5 | required_providers { 6 | digitalocean = "~> 1.16" 7 | ct = "~> 0.4" 8 | template = "~> 2.1" 9 | null = "~> 2.1" 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Developer Certificate of Origin 4 | 5 | By contributing, you agree to the Linux Foundation's Developer Certificate of Origin ([DCO](DCO)). The DCO is a statement that you, the contributor, have the legal right to make your contribution and understand the contribution will be distributed as part of this project. 6 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/prometheus/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: ingress 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: ingress 9 | subjects: 10 | - kind: ServiceAccount 11 | namespace: ingress 12 | name: default 13 | -------------------------------------------------------------------------------- /addons/grafana/providers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-providers 5 | namespace: monitoring 6 | data: 7 | providers.yaml: |+ 8 | apiVersion: 1 9 | providers: 10 | - name: 'default' 11 | ordId: 1 12 | folder: '' 13 | type: file 14 | options: 15 | path: /etc/grafana/dashboards 16 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: ingress 10 | subjects: 11 | - kind: ServiceAccount 12 | namespace: ingress 13 | name: default 14 | -------------------------------------------------------------------------------- /addons/prometheus/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - nodes 9 | - nodes/metrics 10 | - services 11 | - endpoints 12 | - pods 13 | verbs: ["get", "list", "watch"] 14 | - nonResourceURLs: ["/metrics"] 15 | verbs: ["get"] 16 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | output "target_group_http" { 2 | description = "ARN of a target group of workers for HTTP traffic" 3 | value = aws_lb_target_group.workers-http.arn 4 | } 5 | 6 | output "target_group_https" { 7 | description = "ARN of a target group of workers for HTTPS traffic" 8 | value = aws_lb_target_group.workers-https.arn 9 | } 10 | 11 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | output "target_group_http" { 2 | description = "ARN of a target group of workers for HTTP traffic" 3 | value = aws_lb_target_group.workers-http.arn 4 | } 5 | 6 | output "target_group_https" { 7 | description = "ARN of a target group of workers for HTTPS traffic" 8 | value = aws_lb_target_group.workers-https.arn 9 | } 10 | 11 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kube-state-metrics 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kube-state-metrics 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kube-state-metrics 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /addons/grafana/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '8080' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: grafana 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 8080 19 | -------------------------------------------------------------------------------- /addons/prometheus/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '9090' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: prometheus 13 | phase: prod 14 | ports: 15 | - name: web 16 | protocol: TCP 17 | port: 80 18 | targetPort: 9090 19 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/ami.tf: -------------------------------------------------------------------------------- 1 | 2 | data "aws_ami" "fedora-coreos" { 3 | most_recent = true 4 | owners = ["125523088429"] 5 | 6 | filter { 7 | name = "architecture" 8 | values = ["x86_64"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | 16 | filter { 17 | name = "description" 18 | values = ["Fedora CoreOS ${var.os_stream} *"] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/ami.tf: -------------------------------------------------------------------------------- 1 | 2 | data "aws_ami" "fedora-coreos" { 3 | most_recent = true 4 | owners = ["125523088429"] 5 | 6 | filter { 7 | name = "architecture" 8 | values = ["x86_64"] 9 | } 10 | 11 | filter { 12 | name = "virtualization-type" 13 | values = ["hvm"] 14 | } 15 | 16 | filter { 17 | name = "description" 18 | values = ["Fedora CoreOS ${var.os_stream} *"] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /addons/prometheus/discovery/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | # Allow Prometheus to scrape service endpoints 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-scheduler 6 | namespace: kube-system 7 | annotations: 8 | prometheus.io/scrape: 'true' 9 | spec: 10 | type: ClusterIP 11 | clusterIP: None 12 | selector: 13 | k8s-app: kube-scheduler 14 | ports: 15 | - name: metrics 16 | protocol: TCP 17 | port: 10251 18 | targetPort: 10251 19 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | # Outputs for global load balancing 2 | 3 | output "instance_group" { 4 | description = "Worker managed instance group full URL" 5 | value = google_compute_region_instance_group_manager.workers.instance_group 6 | } 7 | 8 | # Outputs for regional load balancing 9 | 10 | output "target_pool" { 11 | description = "Worker target pool self link" 12 | value = google_compute_target_pool.workers.self_link 13 | } 14 | 15 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/workers/outputs.tf: -------------------------------------------------------------------------------- 1 | # Outputs for global load balancing 2 | 3 | output "instance_group" { 4 | description = "Worker managed instance group full URL" 5 | value = google_compute_region_instance_group_manager.workers.instance_group 6 | } 7 | 8 | # Outputs for regional load balancing 9 | 10 | output "target_pool" { 11 | description = "Worker target pool self link" 12 | value = google_compute_target_pool.workers.self_link 13 | } 14 | 15 | -------------------------------------------------------------------------------- /addons/prometheus/discovery/kube-proxy.yaml: -------------------------------------------------------------------------------- 1 | # Allow Prometheus to scrape service endpoints 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-proxy 6 | namespace: kube-system 7 | annotations: 8 | prometheus.io/scrape: 'true' 9 | prometheus.io/port: '10249' 10 | spec: 11 | type: ClusterIP 12 | clusterIP: None 13 | selector: 14 | k8s-app: kube-proxy 15 | ports: 16 | - name: metrics 17 | protocol: TCP 18 | port: 10249 19 | targetPort: 10249 20 | -------------------------------------------------------------------------------- /addons/prometheus/discovery/kube-controller-manager.yaml: -------------------------------------------------------------------------------- 1 | # Allow Prometheus to scrape service endpoints 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-controller-manager 6 | namespace: kube-system 7 | annotations: 8 | prometheus.io/scrape: 'true' 9 | spec: 10 | type: ClusterIP 11 | clusterIP: None 12 | selector: 13 | k8s-app: kube-controller-manager 14 | ports: 15 | - name: metrics 16 | protocol: TCP 17 | port: 10252 18 | targetPort: 10252 19 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/node-exporter/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | spec: 9 | type: ClusterIP 10 | # service is created to allow prometheus to scape endpoints 11 | clusterIP: None 12 | selector: 13 | name: node-exporter 14 | phase: prod 15 | ports: 16 | - name: metrics 17 | protocol: TCP 18 | port: 80 19 | targetPort: 9100 20 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | spec: 9 | type: ClusterIP 10 | # service is created to allow prometheus to scape endpoints 11 | clusterIP: None 12 | selector: 13 | name: kube-state-metrics 14 | phase: prod 15 | ports: 16 | - name: metrics 17 | protocol: TCP 18 | port: 8080 19 | targetPort: 8080 20 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | selector: 12 | name: nginx-ingress-controller 13 | phase: prod 14 | ports: 15 | - name: http 16 | protocol: TCP 17 | port: 80 18 | targetPort: 80 19 | - name: https 20 | protocol: TCP 21 | port: 443 22 | targetPort: 443 23 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-controller-public 5 | namespace: ingress 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/port: '10254' 9 | spec: 10 | type: ClusterIP 11 | clusterIP: 10.3.0.12 12 | selector: 13 | name: ingress-controller-public 14 | phase: prod 15 | ports: 16 | - name: http 17 | protocol: TCP 18 | port: 80 19 | targetPort: 80 20 | - name: https 21 | protocol: TCP 22 | port: 443 23 | targetPort: 443 24 | -------------------------------------------------------------------------------- /.github/issue_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Enhancement 4 | 5 | ### Overview 6 | 7 | One paragraph explanation of the enhancement. 8 | 9 | ### Motivation 10 | 11 | Describe the motivation and what problem this solves. 12 | 13 | ### Tradeoffs 14 | 15 | What are the pros and cons of this feature? How will it be exercised and maintained? 16 | -------------------------------------------------------------------------------- /addons/grafana/datasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-datasources 5 | namespace: monitoring 6 | data: 7 | prometheus.yaml: |+ 8 | apiVersion: 1 9 | datasources: 10 | - name: prometheus 11 | type: prometheus 12 | access: proxy 13 | url: http://prometheus.monitoring.svc.cluster.local 14 | version: 1 15 | editable: false 16 | loki.yaml: |+ 17 | apiVersion: 1 18 | datasources: 19 | - name: loki 20 | type: loki 21 | access: proxy 22 | url: http://loki.monitoring.svc.cluster.local 23 | version: 1 24 | editable: false 25 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/groups.tf: -------------------------------------------------------------------------------- 1 | # Match each controller or worker to a profile 2 | 3 | resource "matchbox_group" "controller" { 4 | count = length(var.controllers) 5 | name = format("%s-%s", var.cluster_name, var.controllers.*.name[count.index]) 6 | profile = matchbox_profile.controllers.*.name[count.index] 7 | 8 | selector = { 9 | mac = var.controllers.*.mac[count.index] 10 | } 11 | } 12 | 13 | resource "matchbox_group" "worker" { 14 | count = length(var.workers) 15 | name = format("%s-%s", var.cluster_name, var.workers.*.name[count.index]) 16 | profile = matchbox_profile.workers.*.name[count.index] 17 | 18 | selector = { 19 | mac = var.workers.*.mac[count.index] 20 | } 21 | } 22 | 23 | -------------------------------------------------------------------------------- /docs/addons/grafana.md: -------------------------------------------------------------------------------- 1 | ## Grafana 2 | 3 | Grafana can be used to build dashboards and visualizations that use Prometheus as the datasource. Create the grafana deployment and service. 4 | 5 | ``` 6 | kubectl apply -f addons/grafana -R 7 | ``` 8 | 9 | Use `kubectl` to authenticate to the apiserver and create a local port-forward to the Grafana pod. 10 | 11 | ``` 12 | kubectl port-forward grafana-POD-ID 8080 -n monitoring 13 | ``` 14 | 15 | Visit [127.0.0.1:8080](http://127.0.0.1:8080) to view the bundled dashboards. 16 | 17 | ![Grafana etcd](../img/grafana-etcd.png) 18 | ![Grafana resources cluster](../img/grafana-resources-cluster.png) 19 | ![Grafana usage cluster](../img/grafana-usage-cluster.png) 20 | ![Grafana usage node](../img/grafana-usage-node.png) 21 | 22 | -------------------------------------------------------------------------------- /addons/prometheus/network-policy.yaml: -------------------------------------------------------------------------------- 1 | # Allow Grafana access and in-cluster Prometheus scraping 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: prometheus 6 | namespace: monitoring 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | name: prometheus 11 | ingress: 12 | - ports: 13 | - protocol: TCP 14 | port: 9090 15 | from: 16 | - namespaceSelector: 17 | matchLabels: 18 | name: monitoring 19 | podSelector: 20 | matchLabels: 21 | name: grafana 22 | - namespaceSelector: 23 | matchLabels: 24 | name: monitoring 25 | podSelector: 26 | matchLabels: 27 | name: prometheus 28 | 29 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = aws_route53_record.etcds.*.fqdn 8 | asset_dir = var.asset_dir 9 | networking = var.networking 10 | network_mtu = var.network_mtu 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | cluster_domain_suffix = var.cluster_domain_suffix 14 | enable_reporting = var.enable_reporting 15 | enable_aggregation = var.enable_aggregation 16 | } 17 | 18 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | cluster_name = var.cluster_name 5 | 6 | # GCE 7 | region = var.region 8 | network = google_compute_network.network.name 9 | worker_count = var.worker_count 10 | machine_type = var.worker_type 11 | os_image = var.os_image 12 | disk_size = var.disk_size 13 | preemptible = var.worker_preemptible 14 | 15 | # configuration 16 | kubeconfig = module.bootstrap.kubeconfig-kubelet 17 | ssh_authorized_key = var.ssh_authorized_key 18 | service_cidr = var.service_cidr 19 | cluster_domain_suffix = var.cluster_domain_suffix 20 | snippets = var.worker_snippets 21 | node_labels = var.worker_node_labels 22 | } 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | cluster_name = var.cluster_name 5 | 6 | # GCE 7 | region = var.region 8 | network = google_compute_network.network.name 9 | worker_count = var.worker_count 10 | machine_type = var.worker_type 11 | os_stream = var.os_stream 12 | disk_size = var.disk_size 13 | preemptible = var.worker_preemptible 14 | 15 | # configuration 16 | kubeconfig = module.bootstrap.kubeconfig-kubelet 17 | ssh_authorized_key = var.ssh_authorized_key 18 | service_cidr = var.service_cidr 19 | cluster_domain_suffix = var.cluster_domain_suffix 20 | snippets = var.worker_snippets 21 | node_labels = var.worker_node_labels 22 | } 23 | 24 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = aws_route53_record.etcds.*.fqdn 8 | asset_dir = var.asset_dir 9 | networking = var.networking 10 | network_mtu = var.network_mtu 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | cluster_domain_suffix = var.cluster_domain_suffix 14 | enable_reporting = var.enable_reporting 15 | enable_aggregation = var.enable_aggregation 16 | 17 | trusted_certs_dir = "/etc/pki/tls/certs" 18 | } 19 | 20 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = digitalocean_record.etcds.*.fqdn 8 | asset_dir = var.asset_dir 9 | 10 | networking = var.networking 11 | 12 | # only effective with Calico networking 13 | network_encapsulation = "vxlan" 14 | network_mtu = "1450" 15 | 16 | pod_cidr = var.pod_cidr 17 | service_cidr = var.service_cidr 18 | cluster_domain_suffix = var.cluster_domain_suffix 19 | enable_reporting = var.enable_reporting 20 | enable_aggregation = var.enable_aggregation 21 | } 22 | 23 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # AWS 6 | vpc_id = aws_vpc.network.id 7 | subnet_ids = aws_subnet.public.*.id 8 | security_groups = [aws_security_group.worker.id] 9 | worker_count = var.worker_count 10 | instance_type = var.worker_type 11 | os_stream = var.os_stream 12 | disk_size = var.disk_size 13 | spot_price = var.worker_price 14 | target_groups = var.worker_target_groups 15 | 16 | # configuration 17 | kubeconfig = module.bootstrap.kubeconfig-kubelet 18 | ssh_authorized_key = var.ssh_authorized_key 19 | service_cidr = var.service_cidr 20 | cluster_domain_suffix = var.cluster_domain_suffix 21 | snippets = var.worker_snippets 22 | node_labels = var.worker_node_labels 23 | } 24 | 25 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = google_dns_record_set.etcds.*.name 8 | asset_dir = var.asset_dir 9 | networking = var.networking 10 | network_mtu = 1440 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | cluster_domain_suffix = var.cluster_domain_suffix 14 | enable_reporting = var.enable_reporting 15 | enable_aggregation = var.enable_aggregation 16 | 17 | // temporary 18 | external_apiserver_port = 443 19 | } 20 | 21 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # AWS 6 | vpc_id = aws_vpc.network.id 7 | subnet_ids = aws_subnet.public.*.id 8 | security_groups = [aws_security_group.worker.id] 9 | worker_count = var.worker_count 10 | instance_type = var.worker_type 11 | os_image = var.os_image 12 | disk_size = var.disk_size 13 | spot_price = var.worker_price 14 | target_groups = var.worker_target_groups 15 | 16 | # configuration 17 | kubeconfig = module.bootstrap.kubeconfig-kubelet 18 | ssh_authorized_key = var.ssh_authorized_key 19 | service_cidr = var.service_cidr 20 | cluster_domain_suffix = var.cluster_domain_suffix 21 | snippets = var.worker_snippets 22 | node_labels = var.worker_node_labels 23 | } 24 | 25 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/workers/target_pool.tf: -------------------------------------------------------------------------------- 1 | # Target pool for TCP/UDP load balancing 2 | resource "google_compute_target_pool" "workers" { 3 | name = "${var.name}-worker-pool" 4 | region = var.region 5 | session_affinity = "NONE" 6 | 7 | health_checks = [ 8 | google_compute_http_health_check.workers.name, 9 | ] 10 | } 11 | 12 | # HTTP Health Check (for TCP/UDP load balancing) 13 | # Forward rules (regional) to target pools don't support different external 14 | # and internal ports. Health check for nodes with Ingress controllers that 15 | # may support proxying or otherwise satisfy the check. 16 | resource "google_compute_http_health_check" "workers" { 17 | name = "${var.name}-target-pool-health" 18 | description = "Health check for the worker target pool" 19 | 20 | port = 10254 21 | request_path = "/healthz" 22 | } 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/target_pool.tf: -------------------------------------------------------------------------------- 1 | # Target pool for TCP/UDP load balancing 2 | resource "google_compute_target_pool" "workers" { 3 | name = "${var.name}-worker-pool" 4 | region = var.region 5 | session_affinity = "NONE" 6 | 7 | health_checks = [ 8 | google_compute_http_health_check.workers.name, 9 | ] 10 | } 11 | 12 | # HTTP Health Check (for TCP/UDP load balancing) 13 | # Forward rules (regional) to target pools don't support different external 14 | # and internal ports. Health check for nodes with Ingress controllers that 15 | # may support proxying or otherwise satisfy the check. 16 | resource "google_compute_http_health_check" "workers" { 17 | name = "${var.name}-target-pool-health" 18 | description = "Health check for the worker target pool" 19 | 20 | port = 10254 21 | request_path = "/healthz" 22 | } 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = google_dns_record_set.etcds.*.name 8 | asset_dir = var.asset_dir 9 | networking = var.networking 10 | network_mtu = 1440 11 | pod_cidr = var.pod_cidr 12 | service_cidr = var.service_cidr 13 | cluster_domain_suffix = var.cluster_domain_suffix 14 | enable_reporting = var.enable_reporting 15 | enable_aggregation = var.enable_aggregation 16 | 17 | trusted_certs_dir = "/etc/pki/tls/certs" 18 | 19 | // temporary 20 | external_apiserver_port = 443 21 | } 22 | 23 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = digitalocean_record.etcds.*.fqdn 8 | asset_dir = var.asset_dir 9 | 10 | networking = var.networking 11 | 12 | # only effective with Calico networking 13 | network_encapsulation = "vxlan" 14 | network_mtu = "1450" 15 | 16 | pod_cidr = var.pod_cidr 17 | service_cidr = var.service_cidr 18 | cluster_domain_suffix = var.cluster_domain_suffix 19 | enable_reporting = var.enable_reporting 20 | enable_aggregation = var.enable_aggregation 21 | 22 | # Fedora CoreOS 23 | trusted_certs_dir = "/etc/pki/tls/certs" 24 | } 25 | 26 | -------------------------------------------------------------------------------- /addons/grafana/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-config 5 | namespace: monitoring 6 | data: 7 | custom.ini: |+ 8 | [server] 9 | http_port = 8080 10 | 11 | [paths] 12 | data = /var/lib/grafana 13 | plugins = /var/lib/grafana/plugins 14 | provisioning = /etc/grafana/provisioning 15 | 16 | [users] 17 | allow_sign_up = false 18 | allow_org_create = false 19 | # viewers can edit/inspect, but not save 20 | viewers_can_edit = true 21 | 22 | # Disable login form, since Grafana always creates an admin user 23 | [auth] 24 | disable_login_form = true 25 | 26 | # Disable the user/pass login system 27 | [auth.basic] 28 | enabled = false 29 | 30 | # Allow anonymous authentication with view-only authorization 31 | [auth.anonymous] 32 | enabled = true 33 | org_role = Viewer 34 | 35 | [analytics] 36 | reporting_enabled = false 37 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | verbs: 14 | - get 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | resourceNames: 20 | # Defaults to "-" 21 | # Here: "-" 22 | # This has to be adapted if you change either parameter 23 | # when launching the nginx-ingress-controller. 24 | - "ingress-controller-leader-public" 25 | verbs: 26 | - get 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | verbs: 33 | - create 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - endpoints 38 | verbs: 39 | - get 40 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | verbs: 14 | - get 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | resourceNames: 20 | # Defaults to "-" 21 | # Here: "-" 22 | # This has to be adapted if you change either parameter 23 | # when launching the nginx-ingress-controller. 24 | - "ingress-controller-leader-public" 25 | verbs: 26 | - get 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | verbs: 33 | - create 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - endpoints 38 | verbs: 39 | - get 40 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | verbs: 14 | - get 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | resourceNames: 20 | # Defaults to "-" 21 | # Here: "-" 22 | # This has to be adapted if you change either parameter 23 | # when launching the nginx-ingress-controller. 24 | - "ingress-controller-leader-public" 25 | verbs: 26 | - get 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | verbs: 33 | - create 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - endpoints 38 | verbs: 39 | - get 40 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | verbs: 14 | - get 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | resourceNames: 20 | # Defaults to "-" 21 | # Here: "-" 22 | # This has to be adapted if you change either parameter 23 | # when launching the nginx-ingress-controller. 24 | - "ingress-controller-leader-public" 25 | verbs: 26 | - get 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | verbs: 33 | - create 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - endpoints 38 | verbs: 39 | - get 40 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: ingress 5 | namespace: ingress 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - pods 12 | - secrets 13 | verbs: 14 | - get 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | resourceNames: 20 | # Defaults to "-" 21 | # Here: "-" 22 | # This has to be adapted if you change either parameter 23 | # when launching the nginx-ingress-controller. 24 | - "ingress-controller-leader-public" 25 | verbs: 26 | - get 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | verbs: 33 | - create 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - endpoints 38 | verbs: 39 | - get 40 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [var.k8s_domain_name] 7 | etcd_servers = var.controllers.*.domain 8 | asset_dir = var.asset_dir 9 | networking = var.networking 10 | network_mtu = var.network_mtu 11 | network_ip_autodetection_method = var.network_ip_autodetection_method 12 | pod_cidr = var.pod_cidr 13 | service_cidr = var.service_cidr 14 | cluster_domain_suffix = var.cluster_domain_suffix 15 | enable_reporting = var.enable_reporting 16 | enable_aggregation = var.enable_aggregation 17 | } 18 | 19 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone) 8 | asset_dir = var.asset_dir 9 | 10 | networking = var.networking 11 | 12 | # only effective with Calico networking 13 | # we should be able to use 1450 MTU, but in practice, 1410 was needed 14 | network_encapsulation = "vxlan" 15 | network_mtu = "1410" 16 | 17 | pod_cidr = var.pod_cidr 18 | service_cidr = var.service_cidr 19 | cluster_domain_suffix = var.cluster_domain_suffix 20 | enable_reporting = var.enable_reporting 21 | enable_aggregation = var.enable_aggregation 22 | } 23 | 24 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # Azure 6 | resource_group_name = azurerm_resource_group.cluster.name 7 | region = azurerm_resource_group.cluster.location 8 | subnet_id = azurerm_subnet.worker.id 9 | security_group_id = azurerm_network_security_group.worker.id 10 | backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id 11 | 12 | worker_count = var.worker_count 13 | vm_type = var.worker_type 14 | os_image = var.os_image 15 | priority = var.worker_priority 16 | 17 | # configuration 18 | kubeconfig = module.bootstrap.kubeconfig-kubelet 19 | ssh_authorized_key = var.ssh_authorized_key 20 | service_cidr = var.service_cidr 21 | cluster_domain_suffix = var.cluster_domain_suffix 22 | snippets = var.worker_snippets 23 | node_labels = var.worker_node_labels 24 | } 25 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | module "workers" { 2 | source = "./workers" 3 | name = var.cluster_name 4 | 5 | # Azure 6 | resource_group_name = azurerm_resource_group.cluster.name 7 | region = azurerm_resource_group.cluster.location 8 | subnet_id = azurerm_subnet.worker.id 9 | security_group_id = azurerm_network_security_group.worker.id 10 | backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id 11 | 12 | worker_count = var.worker_count 13 | vm_type = var.worker_type 14 | os_image = var.os_image 15 | priority = var.worker_priority 16 | 17 | # configuration 18 | kubeconfig = module.bootstrap.kubeconfig-kubelet 19 | ssh_authorized_key = var.ssh_authorized_key 20 | service_cidr = var.service_cidr 21 | cluster_domain_suffix = var.cluster_domain_suffix 22 | snippets = var.worker_snippets 23 | node_labels = var.worker_node_labels 24 | } 25 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [var.k8s_domain_name] 7 | etcd_servers = var.controllers.*.domain 8 | asset_dir = var.asset_dir 9 | networking = var.networking 10 | network_mtu = var.network_mtu 11 | network_ip_autodetection_method = var.network_ip_autodetection_method 12 | pod_cidr = var.pod_cidr 13 | service_cidr = var.service_cidr 14 | cluster_domain_suffix = var.cluster_domain_suffix 15 | enable_reporting = var.enable_reporting 16 | enable_aggregation = var.enable_aggregation 17 | 18 | trusted_certs_dir = "/etc/pki/tls/certs" 19 | } 20 | 21 | 22 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/bootstrap.tf: -------------------------------------------------------------------------------- 1 | # Kubernetes assets (kubeconfig, manifests) 2 | module "bootstrap" { 3 | source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=5a7c963caf59740891df2aeae4b1561ccb3b9db6" 4 | 5 | cluster_name = var.cluster_name 6 | api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] 7 | etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone) 8 | asset_dir = var.asset_dir 9 | 10 | networking = var.networking 11 | 12 | # only effective with Calico networking 13 | # we should be able to use 1450 MTU, but in practice, 1410 was needed 14 | network_encapsulation = "vxlan" 15 | network_mtu = "1410" 16 | 17 | pod_cidr = var.pod_cidr 18 | service_cidr = var.service_cidr 19 | cluster_domain_suffix = var.cluster_domain_suffix 20 | enable_reporting = var.enable_reporting 21 | enable_aggregation = var.enable_aggregation 22 | 23 | # Fedora CoreOS 24 | trusted_certs_dir = "/etc/pki/tls/certs" 25 | } 26 | 27 | -------------------------------------------------------------------------------- /docs/architecture/bare-metal.md: -------------------------------------------------------------------------------- 1 | # Bare-Metal 2 | 3 | ## Load Balancing 4 | 5 | ### kube-apiserver 6 | 7 | Load balancing across controller nodes with a healthy `kube-apiserver` is determined by your unique bare-metal environment and its capabilities. 8 | 9 | ### HTTP/HTTPS Ingress 10 | 11 | Load balancing across worker nodes with a healthy Ingress Controller is determined by your unique bare-metal environment and its capabilities. 12 | 13 | See the `nginx-ingress` addon to run [Nginx as the Ingress Controller](/addons/ingress/#bare-metal) for bare-metal. 14 | 15 | ### TCP/UDP Services 16 | 17 | Load balancing across worker nodes with TCP/UDP services is determined by your unique bare-metal environment and its capabilities. 18 | 19 | ## IPv6 20 | 21 | Status of IPv6 on Typhoon bare-metal clusters. 22 | 23 | | IPv6 Feature | Supported | 24 | |-------------------------|-----------| 25 | | Node IPv6 address | Yes | 26 | | Node Outbound IPv6 | Yes | 27 | | Kubernetes Ingress IPv6 | Possible | 28 | 29 | IPv6 support depends upon the bare-metal network environment. 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: RollingUpdate 10 | rollingUpdate: 11 | maxUnavailable: 1 12 | selector: 13 | matchLabels: 14 | name: kube-state-metrics 15 | phase: prod 16 | template: 17 | metadata: 18 | labels: 19 | name: kube-state-metrics 20 | phase: prod 21 | annotations: 22 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 23 | spec: 24 | serviceAccountName: kube-state-metrics 25 | containers: 26 | - name: kube-state-metrics 27 | image: quay.io/coreos/kube-state-metrics:v1.9.7 28 | ports: 29 | - name: metrics 30 | containerPort: 8080 31 | livenessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | initialDelaySeconds: 5 36 | timeoutSeconds: 5 37 | readinessProbe: 38 | httpGet: 39 | path: / 40 | port: 8081 41 | initialDelaySeconds: 5 42 | timeoutSeconds: 5 43 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Typhoon Authors 4 | Copyright (c) 2020 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Typhoon Authors 4 | Copyright (c) 2020 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Typhoon Authors 4 | Copyright (c) 2020 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Typhoon Authors 4 | Copyright (c) 2017 Dalton Hubble 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | 24 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/ami.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Pick a CoreOS Container Linux derivative 3 | # coreos-stable -> Container Linux AMI 4 | # flatcar-stable -> Flatcar Linux AMI 5 | ami_id = local.flavor == "flatcar" ? data.aws_ami.flatcar.image_id : data.aws_ami.coreos.image_id 6 | 7 | flavor = split("-", var.os_image)[0] 8 | channel = split("-", var.os_image)[1] 9 | } 10 | 11 | data "aws_ami" "coreos" { 12 | most_recent = true 13 | owners = ["595879546273"] 14 | 15 | filter { 16 | name = "architecture" 17 | values = ["x86_64"] 18 | } 19 | 20 | filter { 21 | name = "virtualization-type" 22 | values = ["hvm"] 23 | } 24 | 25 | filter { 26 | name = "name" 27 | values = ["CoreOS-${local.flavor == "coreos" ? local.channel : "stable"}-*"] 28 | } 29 | } 30 | 31 | data "aws_ami" "flatcar" { 32 | most_recent = true 33 | owners = ["075585003325"] 34 | 35 | filter { 36 | name = "architecture" 37 | values = ["x86_64"] 38 | } 39 | 40 | filter { 41 | name = "virtualization-type" 42 | values = ["hvm"] 43 | } 44 | 45 | filter { 46 | name = "name" 47 | values = ["Flatcar-${local.flavor == "flatcar" ? local.channel : "stable"}-*"] 48 | } 49 | } 50 | 51 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | 63 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/workers/ami.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Pick a CoreOS Container Linux derivative 3 | # coreos-stable -> Container Linux AMI 4 | # flatcar-stable -> Flatcar Linux AMI 5 | ami_id = local.flavor == "flatcar" ? data.aws_ami.flatcar.image_id : data.aws_ami.coreos.image_id 6 | 7 | flavor = split("-", var.os_image)[0] 8 | channel = split("-", var.os_image)[1] 9 | } 10 | 11 | data "aws_ami" "coreos" { 12 | most_recent = true 13 | owners = ["595879546273"] 14 | 15 | filter { 16 | name = "architecture" 17 | values = ["x86_64"] 18 | } 19 | 20 | filter { 21 | name = "virtualization-type" 22 | values = ["hvm"] 23 | } 24 | 25 | filter { 26 | name = "name" 27 | values = ["CoreOS-${local.flavor == "coreos" ? local.channel : "stable"}-*"] 28 | } 29 | } 30 | 31 | data "aws_ami" "flatcar" { 32 | most_recent = true 33 | owners = ["075585003325"] 34 | 35 | filter { 36 | name = "architecture" 37 | values = ["x86_64"] 38 | } 39 | 40 | filter { 41 | name = "virtualization-type" 42 | values = ["hvm"] 43 | } 44 | 45 | filter { 46 | name = "name" 47 | values = ["Flatcar-${local.flavor == "flatcar" ? local.channel : "stable"}-*"] 48 | } 49 | } 50 | 51 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | 63 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | 63 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | 63 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: ingress 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - nodes 21 | verbs: 22 | - get 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - services 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | - apiGroups: 39 | - "extensions" 40 | - "networking.k8s.io" 41 | resources: 42 | - ingresses 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "extensions" 49 | - "networking.k8s.io" 50 | resources: 51 | - ingresses/status 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "networking.k8s.io" 56 | resources: 57 | - ingressclasses 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | 63 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "ingress_static_ipv4" { 8 | description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller" 9 | value = google_compute_global_address.ingress-ipv4.address 10 | } 11 | 12 | output "ingress_static_ipv6" { 13 | description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller" 14 | value = google_compute_global_address.ingress-ipv6.address 15 | } 16 | 17 | # Outputs for worker pools 18 | 19 | output "network_name" { 20 | value = google_compute_network.network.name 21 | } 22 | 23 | output "kubeconfig" { 24 | value = module.bootstrap.kubeconfig-kubelet 25 | } 26 | 27 | # Outputs for custom firewalling 28 | 29 | output "network_self_link" { 30 | value = google_compute_network.network.self_link 31 | } 32 | 33 | # Outputs for custom load balancing 34 | 35 | output "worker_instance_group" { 36 | description = "Worker managed instance group full URL" 37 | value = module.workers.instance_group 38 | } 39 | 40 | output "worker_target_pool" { 41 | description = "Worker target pool self link" 42 | value = module.workers.target_pool 43 | } 44 | 45 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "ingress_static_ipv4" { 8 | description = "Global IPv4 address for proxy load balancing to the nearest Ingress controller" 9 | value = google_compute_global_address.ingress-ipv4.address 10 | } 11 | 12 | output "ingress_static_ipv6" { 13 | description = "Global IPv6 address for proxy load balancing to the nearest Ingress controller" 14 | value = google_compute_global_address.ingress-ipv6.address 15 | } 16 | 17 | # Outputs for worker pools 18 | 19 | output "network_name" { 20 | value = google_compute_network.network.name 21 | } 22 | 23 | output "kubeconfig" { 24 | value = module.bootstrap.kubeconfig-kubelet 25 | } 26 | 27 | # Outputs for custom firewalling 28 | 29 | output "network_self_link" { 30 | value = google_compute_network.network.self_link 31 | } 32 | 33 | # Outputs for custom load balancing 34 | 35 | output "worker_instance_group" { 36 | description = "Worker managed instance group full URL" 37 | value = module.workers.instance_group 38 | } 39 | 40 | output "worker_target_pool" { 41 | description = "Worker target pool self link" 42 | value = module.workers.target_pool 43 | } 44 | 45 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/workers/ingress.tf: -------------------------------------------------------------------------------- 1 | # Target groups of instances for use with load balancers 2 | 3 | resource "aws_lb_target_group" "workers-http" { 4 | name = "${var.name}-workers-http" 5 | vpc_id = var.vpc_id 6 | target_type = "instance" 7 | 8 | protocol = "TCP" 9 | port = 80 10 | 11 | # HTTP health check for ingress 12 | health_check { 13 | protocol = "HTTP" 14 | port = 10254 15 | path = "/healthz" 16 | 17 | # NLBs required to use same healthy and unhealthy thresholds 18 | healthy_threshold = 3 19 | unhealthy_threshold = 3 20 | 21 | # Interval between health checks required to be 10 or 30 22 | interval = 10 23 | } 24 | } 25 | 26 | resource "aws_lb_target_group" "workers-https" { 27 | name = "${var.name}-workers-https" 28 | vpc_id = var.vpc_id 29 | target_type = "instance" 30 | 31 | protocol = "TCP" 32 | port = 443 33 | 34 | # HTTP health check for ingress 35 | health_check { 36 | protocol = "HTTP" 37 | port = 10254 38 | path = "/healthz" 39 | 40 | # NLBs required to use same healthy and unhealthy thresholds 41 | healthy_threshold = 3 42 | unhealthy_threshold = 3 43 | 44 | # Interval between health checks required to be 10 or 30 45 | interval = 10 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/workers/ingress.tf: -------------------------------------------------------------------------------- 1 | # Target groups of instances for use with load balancers 2 | 3 | resource "aws_lb_target_group" "workers-http" { 4 | name = "${var.name}-workers-http" 5 | vpc_id = var.vpc_id 6 | target_type = "instance" 7 | 8 | protocol = "TCP" 9 | port = 80 10 | 11 | # HTTP health check for ingress 12 | health_check { 13 | protocol = "HTTP" 14 | port = 10254 15 | path = "/healthz" 16 | 17 | # NLBs required to use same healthy and unhealthy thresholds 18 | healthy_threshold = 3 19 | unhealthy_threshold = 3 20 | 21 | # Interval between health checks required to be 10 or 30 22 | interval = 10 23 | } 24 | } 25 | 26 | resource "aws_lb_target_group" "workers-https" { 27 | name = "${var.name}-workers-https" 28 | vpc_id = var.vpc_id 29 | target_type = "instance" 30 | 31 | protocol = "TCP" 32 | port = 443 33 | 34 | # HTTP health check for ingress 35 | health_check { 36 | protocol = "HTTP" 37 | port = 10254 38 | path = "/healthz" 39 | 40 | # NLBs required to use same healthy and unhealthy thresholds 41 | healthy_threshold = 3 42 | unhealthy_threshold = 3 43 | 44 | # Interval between health checks required to be 10 or 30 45 | interval = 10 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /docs/topics/faq.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | ## Terraform 4 | 5 | Typhoon provides a Terraform Module for each supported operating system and platform. Terraform is considered a *format* detail, much like a Linux distro might provide images in the qcow2 or ISO format. It is a mechanism for sharing Typhoon in a way that works for many users. 6 | 7 | Formats rise and evolve. Typhoon may choose to adapt the format over time (with lots of forewarning). However, the authors' have built several Kubernetes "distros" before and learned from mistakes - Terraform modules are the right format for now. 8 | 9 | ## Get Help 10 | 11 | Ask questions on the IRC #typhoon channel on [freenode.net](http://freenode.net/). 12 | 13 | ## Security Issues 14 | 15 | If you find security issues, please see [security disclosures](/topics/security.md#disclosures). 16 | 17 | ## Maintainers 18 | 19 | Typhoon clusters are Kubernetes clusters the maintainers use in real-world, production clusters. 20 | 21 | * Maintainers must personally operate a bare-metal and cloud provider cluster and strive to exercise it in real-world scenarios 22 | 23 | We merge features that are along the "blessed path". We minimize options to reduce complexity and matrix size. We remove outdated materials to reduce sprawl. "Skate where the puck is going", but also "wait until the fit is right". No is temporary, yes is forever. 24 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = aws_instance.controllers.*.public_ip[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "$HOME/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | aws_route53_record.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = aws_instance.controllers[0].public_ip 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = local.controllers_ipv4_public[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "$HOME/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | google_dns_record_set.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = local.controllers_ipv4_public[0] 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = aws_instance.controllers.*.public_ip[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "$HOME/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | aws_route53_record.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = aws_instance.controllers[0].public_ip 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | ] 17 | 18 | connection { 19 | type = "ssh" 20 | host = local.controllers_ipv4_public[count.index] 21 | user = "core" 22 | timeout = "15m" 23 | } 24 | 25 | provisioner "file" { 26 | content = join("\n", local.assets_bundle) 27 | destination = "$HOME/assets" 28 | } 29 | 30 | provisioner "remote-exec" { 31 | inline = [ 32 | "sudo /opt/bootstrap/layout", 33 | ] 34 | } 35 | } 36 | 37 | # Connect to a controller to perform one-time cluster bootstrap. 38 | resource "null_resource" "bootstrap" { 39 | depends_on = [ 40 | null_resource.copy-controller-secrets, 41 | module.workers, 42 | google_dns_record_set.apiserver, 43 | ] 44 | 45 | connection { 46 | type = "ssh" 47 | host = local.controllers_ipv4_public[0] 48 | user = "core" 49 | timeout = "15m" 50 | } 51 | 52 | provisioner "remote-exec" { 53 | inline = [ 54 | "sudo systemctl start bootstrap", 55 | ] 56 | } 57 | } 58 | 59 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "controllers_dns" { 8 | value = digitalocean_record.controllers[0].fqdn 9 | } 10 | 11 | output "workers_dns" { 12 | # Multiple A and AAAA records with the same FQDN 13 | value = digitalocean_record.workers-record-a[0].fqdn 14 | } 15 | 16 | output "controllers_ipv4" { 17 | value = digitalocean_droplet.controllers.*.ipv4_address 18 | } 19 | 20 | output "controllers_ipv6" { 21 | value = digitalocean_droplet.controllers.*.ipv6_address 22 | } 23 | 24 | output "workers_ipv4" { 25 | value = digitalocean_droplet.workers.*.ipv4_address 26 | } 27 | 28 | output "workers_ipv6" { 29 | value = digitalocean_droplet.workers.*.ipv6_address 30 | } 31 | 32 | # Outputs for worker pools 33 | 34 | output "kubeconfig" { 35 | value = module.bootstrap.kubeconfig-kubelet 36 | } 37 | 38 | # Outputs for custom firewalls 39 | 40 | output "controller_tag" { 41 | description = "Tag applied to controller droplets" 42 | value = digitalocean_tag.controllers.name 43 | } 44 | 45 | output "worker_tag" { 46 | description = "Tag applied to worker droplets" 47 | value = digitalocean_tag.workers.name 48 | } 49 | 50 | # Outputs for custom load balancing 51 | 52 | output "vpc_id" { 53 | description = "ID of the cluster VPC" 54 | value = digitalocean_vpc.network.id 55 | } 56 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "controllers_dns" { 8 | value = digitalocean_record.controllers[0].fqdn 9 | } 10 | 11 | output "workers_dns" { 12 | # Multiple A and AAAA records with the same FQDN 13 | value = digitalocean_record.workers-record-a[0].fqdn 14 | } 15 | 16 | output "controllers_ipv4" { 17 | value = digitalocean_droplet.controllers.*.ipv4_address 18 | } 19 | 20 | output "controllers_ipv6" { 21 | value = digitalocean_droplet.controllers.*.ipv6_address 22 | } 23 | 24 | output "workers_ipv4" { 25 | value = digitalocean_droplet.workers.*.ipv4_address 26 | } 27 | 28 | output "workers_ipv6" { 29 | value = digitalocean_droplet.workers.*.ipv6_address 30 | } 31 | 32 | # Outputs for worker pools 33 | 34 | output "kubeconfig" { 35 | value = module.bootstrap.kubeconfig-kubelet 36 | } 37 | 38 | # Outputs for custom firewalls 39 | 40 | output "controller_tag" { 41 | description = "Tag applied to controller droplets" 42 | value = digitalocean_tag.controllers.name 43 | } 44 | 45 | output "worker_tag" { 46 | description = "Tag applied to worker droplets" 47 | value = digitalocean_tag.workers.name 48 | } 49 | 50 | # Outputs for custom load balancing 51 | 52 | output "vpc_id" { 53 | description = "ID of the cluster VPC" 54 | value = digitalocean_vpc.network.id 55 | } 56 | 57 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/groups.tf: -------------------------------------------------------------------------------- 1 | resource "matchbox_group" "install" { 2 | count = length(var.controllers) + length(var.workers) 3 | 4 | name = format("install-%s", concat(var.controllers.*.name, var.workers.*.name)[count.index]) 5 | 6 | # pick one of 4 Matchbox profiles (Container Linux or Flatcar, cached or non-cached) 7 | profile = local.flavor == "flatcar" ? var.cached_install ? matchbox_profile.cached-flatcar-linux-install.*.name[count.index] : matchbox_profile.flatcar-install.*.name[count.index] : var.cached_install ? matchbox_profile.cached-container-linux-install.*.name[count.index] : matchbox_profile.container-linux-install.*.name[count.index] 8 | 9 | selector = { 10 | mac = concat(var.controllers.*.mac, var.workers.*.mac)[count.index] 11 | } 12 | } 13 | 14 | resource "matchbox_group" "controller" { 15 | count = length(var.controllers) 16 | name = format("%s-%s", var.cluster_name, var.controllers[count.index].name) 17 | profile = matchbox_profile.controllers.*.name[count.index] 18 | 19 | selector = { 20 | mac = var.controllers[count.index].mac 21 | os = "installed" 22 | } 23 | } 24 | 25 | resource "matchbox_group" "worker" { 26 | count = length(var.workers) 27 | name = format("%s-%s", var.cluster_name, var.workers[count.index].name) 28 | profile = matchbox_profile.workers.*.name[count.index] 29 | 30 | selector = { 31 | mac = var.workers[count.index].mac 32 | os = "installed" 33 | } 34 | } 35 | 36 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | azurerm_linux_virtual_machine.controllers 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = azurerm_public_ip.controllers.*.ip_address[count.index] 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = join("\n", local.assets_bundle) 28 | destination = "$HOME/assets" 29 | } 30 | 31 | provisioner "remote-exec" { 32 | inline = [ 33 | "sudo /opt/bootstrap/layout", 34 | ] 35 | } 36 | } 37 | 38 | # Connect to a controller to perform one-time cluster bootstrap. 39 | resource "null_resource" "bootstrap" { 40 | depends_on = [ 41 | null_resource.copy-controller-secrets, 42 | module.workers, 43 | azurerm_dns_a_record.apiserver, 44 | ] 45 | 46 | connection { 47 | type = "ssh" 48 | host = azurerm_public_ip.controllers.*.ip_address[0] 49 | user = "core" 50 | timeout = "15m" 51 | } 52 | 53 | provisioner "remote-exec" { 54 | inline = [ 55 | "sudo systemctl start bootstrap", 56 | ] 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | azurerm_linux_virtual_machine.controllers 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = azurerm_public_ip.controllers.*.ip_address[count.index] 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = join("\n", local.assets_bundle) 28 | destination = "$HOME/assets" 29 | } 30 | 31 | provisioner "remote-exec" { 32 | inline = [ 33 | "sudo /opt/bootstrap/layout", 34 | ] 35 | } 36 | } 37 | 38 | # Connect to a controller to perform one-time cluster bootstrap. 39 | resource "null_resource" "bootstrap" { 40 | depends_on = [ 41 | null_resource.copy-controller-secrets, 42 | module.workers, 43 | azurerm_dns_a_record.apiserver, 44 | ] 45 | 46 | connection { 47 | type = "ssh" 48 | host = azurerm_public_ip.controllers.*.ip_address[0] 49 | user = "core" 50 | timeout = "15m" 51 | } 52 | 53 | provisioner "remote-exec" { 54 | inline = [ 55 | "sudo systemctl start bootstrap", 56 | ] 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/cl/bare-metal/). 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/container-linux/kubernetes/cl/install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | systemd: 3 | units: 4 | - name: installer.service 5 | enabled: true 6 | contents: | 7 | [Unit] 8 | Requires=network-online.target 9 | After=network-online.target 10 | [Service] 11 | Type=simple 12 | ExecStart=/opt/installer 13 | [Install] 14 | WantedBy=multi-user.target 15 | # Avoid using the standard SSH port so terraform apply cannot SSH until 16 | # post-install. But admins may SSH to debug disk install problems. 17 | # After install, sshd will use port 22 and users/terraform can connect. 18 | - name: sshd.socket 19 | dropins: 20 | - name: 10-sshd-port.conf 21 | contents: | 22 | [Socket] 23 | ListenStream= 24 | ListenStream=2222 25 | storage: 26 | files: 27 | - path: /opt/installer 28 | filesystem: root 29 | mode: 0500 30 | contents: 31 | inline: | 32 | #!/bin/bash -ex 33 | curl --retry 10 "${ignition_endpoint}?{{.request.raw_query}}&os=installed" -o ignition.json 34 | ${os_flavor}-install \ 35 | -d ${install_disk} \ 36 | -C ${os_channel} \ 37 | -V ${os_version} \ 38 | ${baseurl_flag} \ 39 | -i ignition.json 40 | udevadm settle 41 | systemctl reboot 42 | passwd: 43 | users: 44 | - name: core 45 | ssh_authorized_keys: 46 | - "${ssh_authorized_key}" 47 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/cl/digital-ocean/). 23 | 24 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Digital Ocean [tutorial](https://typhoon.psdn.io/fedora-coreos/digitalocean/). 23 | 24 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the bare-metal [tutorial](https://typhoon.psdn.io/fedora-coreos/bare-metal/). 23 | 24 | -------------------------------------------------------------------------------- /DCO: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 1 Letterman Drive 6 | Suite D4700 7 | San Francisco, CA, 94129 8 | 9 | Everyone is permitted to copy and distribute verbatim copies of this 10 | license document, but changing it is not allowed. 11 | 12 | 13 | Developer's Certificate of Origin 1.1 14 | 15 | By making a contribution to this project, I certify that: 16 | 17 | (a) The contribution was created in whole or in part by me and I 18 | have the right to submit it under the open source license 19 | indicated in the file; or 20 | 21 | (b) The contribution is based upon previous work that, to the best 22 | of my knowledge, is covered under an appropriate open source 23 | license and I have the right under that license to submit that 24 | work with modifications, whether created in whole or in part 25 | by me, under the same open source license (unless I am 26 | permitted to submit under a different license), as indicated 27 | in the file; or 28 | 29 | (c) The contribution was provided directly to me by some other 30 | person who certified (a), (b) or (c) and I have not modified 31 | it. 32 | 33 | (d) I understand and agree that this project and the contribution 34 | are public and that a record of the contribution (including all 35 | personal information I submit with it, including my sign-off) is 36 | maintained indefinitely and may be redistributed consistent with 37 | this project or the open source license(s) involved. 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report a bug to improve the project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Description** 13 | 14 | A clear and concise description of what the bug is. 15 | 16 | **Steps to Reproduce** 17 | 18 | Provide clear steps to reproduce the bug. 19 | 20 | - [ ] Relevant error messages if appropriate (concise, not a dump of everything). 21 | - [ ] Explored using a vanilla cluster from the [tutorials](https://typhoon.psdn.io/#documentation). Ruled out [customizations](https://typhoon.psdn.io/advanced/customization/). 22 | 23 | **Expected behavior** 24 | 25 | A clear and concise description of what you expected to happen. 26 | 27 | **Environment** 28 | 29 | * Platform: aws, azure, bare-metal, google-cloud, digital-ocean 30 | * OS: fedora-coreos, flatcar-linux (include release version) 31 | * Release: Typhoon version or Git SHA (reporting latest is **not** helpful) 32 | * Terraform: `terraform version` (reporting latest is **not** helpful) 33 | * Plugins: Provider plugin versions (reporting latest is **not** helpful) 34 | 35 | **Possible Solution** 36 | 37 | 38 | 39 | Link to a PR or description. 40 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/cl/aws/). 23 | 24 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/cl/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Azure [tutorial](https://typhoon.psdn.io/cl/azure/). 23 | 24 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/cl/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the AWS [tutorial](https://typhoon.psdn.io/fedora-coreos/aws/). 23 | 24 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "ingress_dns_name" { 8 | value = aws_lb.nlb.dns_name 9 | description = "DNS name of the network load balancer for distributing traffic to Ingress controllers" 10 | } 11 | 12 | output "ingress_zone_id" { 13 | value = aws_lb.nlb.zone_id 14 | description = "Route53 zone id of the network load balancer DNS name that can be used in Route53 alias records" 15 | } 16 | 17 | # Outputs for worker pools 18 | 19 | output "vpc_id" { 20 | value = aws_vpc.network.id 21 | description = "ID of the VPC for creating worker instances" 22 | } 23 | 24 | output "subnet_ids" { 25 | value = aws_subnet.public.*.id 26 | description = "List of subnet IDs for creating worker instances" 27 | } 28 | 29 | output "worker_security_groups" { 30 | value = [aws_security_group.worker.id] 31 | description = "List of worker security group IDs" 32 | } 33 | 34 | output "kubeconfig" { 35 | value = module.bootstrap.kubeconfig-kubelet 36 | } 37 | 38 | # Outputs for custom load balancing 39 | 40 | output "nlb_id" { 41 | description = "ARN of the Network Load Balancer" 42 | value = aws_lb.nlb.id 43 | } 44 | 45 | output "worker_target_group_http" { 46 | description = "ARN of a target group of workers for HTTP traffic" 47 | value = module.workers.target_group_http 48 | } 49 | 50 | output "worker_target_group_https" { 51 | description = "ARN of a target group of workers for HTTPS traffic" 52 | value = module.workers.target_group_https 53 | } 54 | 55 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "ingress_static_ipv4" { 8 | value = azurerm_public_ip.ingress-ipv4.ip_address 9 | description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers" 10 | } 11 | 12 | # Outputs for worker pools 13 | 14 | output "region" { 15 | value = azurerm_resource_group.cluster.location 16 | } 17 | 18 | output "resource_group_name" { 19 | value = azurerm_resource_group.cluster.name 20 | } 21 | 22 | output "resource_group_id" { 23 | value = azurerm_resource_group.cluster.id 24 | } 25 | 26 | output "subnet_id" { 27 | value = azurerm_subnet.worker.id 28 | } 29 | 30 | output "security_group_id" { 31 | value = azurerm_network_security_group.worker.id 32 | } 33 | 34 | output "kubeconfig" { 35 | value = module.bootstrap.kubeconfig-kubelet 36 | } 37 | 38 | # Outputs for custom firewalling 39 | 40 | output "worker_security_group_name" { 41 | value = azurerm_network_security_group.worker.name 42 | } 43 | 44 | output "worker_address_prefix" { 45 | description = "Worker network subnet CIDR address (for source/destination)" 46 | value = azurerm_subnet.worker.address_prefix 47 | } 48 | 49 | # Outputs for custom load balancing 50 | 51 | output "loadbalancer_id" { 52 | description = "ID of the cluster load balancer" 53 | value = azurerm_lb.cluster.id 54 | } 55 | 56 | output "backend_address_pool_id" { 57 | description = "ID of the worker backend address pool" 58 | value = azurerm_lb_backend_address_pool.worker.id 59 | } 60 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "ingress_dns_name" { 8 | value = aws_lb.nlb.dns_name 9 | description = "DNS name of the network load balancer for distributing traffic to Ingress controllers" 10 | } 11 | 12 | output "ingress_zone_id" { 13 | value = aws_lb.nlb.zone_id 14 | description = "Route53 zone id of the network load balancer DNS name that can be used in Route53 alias records" 15 | } 16 | 17 | # Outputs for worker pools 18 | 19 | output "vpc_id" { 20 | value = aws_vpc.network.id 21 | description = "ID of the VPC for creating worker instances" 22 | } 23 | 24 | output "subnet_ids" { 25 | value = aws_subnet.public.*.id 26 | description = "List of subnet IDs for creating worker instances" 27 | } 28 | 29 | output "worker_security_groups" { 30 | value = [aws_security_group.worker.id] 31 | description = "List of worker security group IDs" 32 | } 33 | 34 | output "kubeconfig" { 35 | value = module.bootstrap.kubeconfig-kubelet 36 | } 37 | 38 | # Outputs for custom load balancing 39 | 40 | output "nlb_id" { 41 | description = "ARN of the Network Load Balancer" 42 | value = aws_lb.nlb.id 43 | } 44 | 45 | output "worker_target_group_http" { 46 | description = "ARN of a target group of workers for HTTP traffic" 47 | value = module.workers.target_group_http 48 | } 49 | 50 | output "worker_target_group_https" { 51 | description = "ARN of a target group of workers for HTTPS traffic" 52 | value = module.workers.target_group_https 53 | } 54 | 55 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig-admin" { 2 | value = module.bootstrap.kubeconfig-admin 3 | } 4 | 5 | # Outputs for Kubernetes Ingress 6 | 7 | output "ingress_static_ipv4" { 8 | value = azurerm_public_ip.ingress-ipv4.ip_address 9 | description = "IPv4 address of the load balancer for distributing traffic to Ingress controllers" 10 | } 11 | 12 | # Outputs for worker pools 13 | 14 | output "region" { 15 | value = azurerm_resource_group.cluster.location 16 | } 17 | 18 | output "resource_group_name" { 19 | value = azurerm_resource_group.cluster.name 20 | } 21 | 22 | output "resource_group_id" { 23 | value = azurerm_resource_group.cluster.id 24 | } 25 | 26 | output "subnet_id" { 27 | value = azurerm_subnet.worker.id 28 | } 29 | 30 | output "security_group_id" { 31 | value = azurerm_network_security_group.worker.id 32 | } 33 | 34 | output "kubeconfig" { 35 | value = module.bootstrap.kubeconfig-kubelet 36 | } 37 | 38 | # Outputs for custom firewalling 39 | 40 | output "worker_security_group_name" { 41 | value = azurerm_network_security_group.worker.name 42 | } 43 | 44 | output "worker_address_prefix" { 45 | description = "Worker network subnet CIDR address (for source/destination)" 46 | value = azurerm_subnet.worker.address_prefix 47 | } 48 | 49 | # Outputs for custom load balancing 50 | 51 | output "loadbalancer_id" { 52 | description = "ID of the cluster load balancer" 53 | value = azurerm_lb.cluster.id 54 | } 55 | 56 | output "backend_address_pool_id" { 57 | description = "ID of the worker backend address pool" 58 | value = azurerm_lb_backend_address_pool.worker.id 59 | } 60 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/) customization 18 | * Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Azure [tutorial](https://typhoon.psdn.io/fedora-coreos/azure/). 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/cl/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#container-linux) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/cl/google-cloud/). 23 | 24 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Typhoon 2 | 3 | Typhoon is a minimal and free Kubernetes distribution. 4 | 5 | * Minimal, stable base Kubernetes distribution 6 | * Declarative infrastructure and configuration 7 | * Free (freedom and cost) and privacy-respecting 8 | * Practical for labs, datacenters, and clouds 9 | 10 | Typhoon distributes upstream Kubernetes, architectural conventions, and cluster addons, much like a GNU/Linux distribution provides the Linux kernel and userspace components. 11 | 12 | ## Features 13 | 14 | * Kubernetes v1.18.5 (upstream) 15 | * Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking 16 | * On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing 17 | * Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/fedora-coreos/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/) customization 18 | * Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/) 19 | 20 | ## Docs 21 | 22 | Please see the [official docs](https://typhoon.psdn.io) and the Google Cloud [tutorial](https://typhoon.psdn.io/fedora-coreos/google-cloud/). 23 | 24 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/network.tf: -------------------------------------------------------------------------------- 1 | # Organize cluster into a resource group 2 | resource "azurerm_resource_group" "cluster" { 3 | name = var.cluster_name 4 | location = var.region 5 | } 6 | 7 | resource "azurerm_virtual_network" "network" { 8 | resource_group_name = azurerm_resource_group.cluster.name 9 | 10 | name = var.cluster_name 11 | location = azurerm_resource_group.cluster.location 12 | address_space = [var.host_cidr] 13 | } 14 | 15 | # Subnets - separate subnets for controller and workers because Azure 16 | # network security groups are based on IPv4 CIDR rather than instance 17 | # tags like GCP or security group membership like AWS 18 | 19 | resource "azurerm_subnet" "controller" { 20 | resource_group_name = azurerm_resource_group.cluster.name 21 | 22 | name = "controller" 23 | virtual_network_name = azurerm_virtual_network.network.name 24 | address_prefixes = [cidrsubnet(var.host_cidr, 1, 0)] 25 | } 26 | 27 | resource "azurerm_subnet_network_security_group_association" "controller" { 28 | subnet_id = azurerm_subnet.controller.id 29 | network_security_group_id = azurerm_network_security_group.controller.id 30 | } 31 | 32 | resource "azurerm_subnet" "worker" { 33 | resource_group_name = azurerm_resource_group.cluster.name 34 | 35 | name = "worker" 36 | virtual_network_name = azurerm_virtual_network.network.name 37 | address_prefixes = [cidrsubnet(var.host_cidr, 1, 1)] 38 | } 39 | 40 | resource "azurerm_subnet_network_security_group_association" "worker" { 41 | subnet_id = azurerm_subnet.worker.id 42 | network_security_group_id = azurerm_network_security_group.worker.id 43 | } 44 | 45 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/network.tf: -------------------------------------------------------------------------------- 1 | # Organize cluster into a resource group 2 | resource "azurerm_resource_group" "cluster" { 3 | name = var.cluster_name 4 | location = var.region 5 | } 6 | 7 | resource "azurerm_virtual_network" "network" { 8 | resource_group_name = azurerm_resource_group.cluster.name 9 | 10 | name = var.cluster_name 11 | location = azurerm_resource_group.cluster.location 12 | address_space = [var.host_cidr] 13 | } 14 | 15 | # Subnets - separate subnets for controller and workers because Azure 16 | # network security groups are based on IPv4 CIDR rather than instance 17 | # tags like GCP or security group membership like AWS 18 | 19 | resource "azurerm_subnet" "controller" { 20 | resource_group_name = azurerm_resource_group.cluster.name 21 | 22 | name = "controller" 23 | virtual_network_name = azurerm_virtual_network.network.name 24 | address_prefixes = [cidrsubnet(var.host_cidr, 1, 0)] 25 | } 26 | 27 | resource "azurerm_subnet_network_security_group_association" "controller" { 28 | subnet_id = azurerm_subnet.controller.id 29 | network_security_group_id = azurerm_network_security_group.controller.id 30 | } 31 | 32 | resource "azurerm_subnet" "worker" { 33 | resource_group_name = azurerm_resource_group.cluster.name 34 | 35 | name = "worker" 36 | virtual_network_name = azurerm_virtual_network.network.name 37 | address_prefixes = [cidrsubnet(var.host_cidr, 1, 1)] 38 | } 39 | 40 | resource "azurerm_subnet_network_security_group_association" "worker" { 41 | subnet_id = azurerm_subnet.worker.id 42 | network_security_group_id = azurerm_network_security_group.worker.id 43 | } 44 | 45 | -------------------------------------------------------------------------------- /addons/prometheus/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: prometheus 11 | phase: prod 12 | template: 13 | metadata: 14 | labels: 15 | name: prometheus 16 | phase: prod 17 | annotations: 18 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 19 | spec: 20 | serviceAccountName: prometheus 21 | containers: 22 | - name: prometheus 23 | image: quay.io/prometheus/prometheus:v2.19.1 24 | args: 25 | - --web.listen-address=0.0.0.0:9090 26 | - --config.file=/etc/prometheus/prometheus.yaml 27 | - --storage.tsdb.path=/var/lib/prometheus 28 | ports: 29 | - name: web 30 | containerPort: 9090 31 | resources: 32 | requests: 33 | cpu: 100m 34 | memory: 200Mi 35 | volumeMounts: 36 | - name: config 37 | mountPath: /etc/prometheus 38 | - name: rules 39 | mountPath: /etc/prometheus/rules 40 | - name: data 41 | mountPath: /var/lib/prometheus 42 | livenessProbe: 43 | httpGet: 44 | path: /-/healthy 45 | port: 9090 46 | initialDelaySeconds: 10 47 | timeoutSeconds: 10 48 | readinessProbe: 49 | httpGet: 50 | path: /-/ready 51 | port: 9090 52 | initialDelaySeconds: 10 53 | timeoutSeconds: 10 54 | terminationGracePeriodSeconds: 30 55 | volumes: 56 | - name: config 57 | configMap: 58 | name: prometheus-config 59 | - name: rules 60 | configMap: 61 | name: prometheus-rules 62 | - name: data 63 | emptyDir: {} 64 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/network.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "all" { 2 | } 3 | 4 | # Network VPC, gateway, and routes 5 | 6 | resource "aws_vpc" "network" { 7 | cidr_block = var.host_cidr 8 | assign_generated_ipv6_cidr_block = true 9 | enable_dns_support = true 10 | enable_dns_hostnames = true 11 | 12 | tags = { 13 | "Name" = var.cluster_name 14 | } 15 | } 16 | 17 | resource "aws_internet_gateway" "gateway" { 18 | vpc_id = aws_vpc.network.id 19 | 20 | tags = { 21 | "Name" = var.cluster_name 22 | } 23 | } 24 | 25 | resource "aws_route_table" "default" { 26 | vpc_id = aws_vpc.network.id 27 | 28 | tags = { 29 | "Name" = var.cluster_name 30 | } 31 | } 32 | 33 | resource "aws_route" "egress-ipv4" { 34 | route_table_id = aws_route_table.default.id 35 | destination_cidr_block = "0.0.0.0/0" 36 | gateway_id = aws_internet_gateway.gateway.id 37 | } 38 | 39 | resource "aws_route" "egress-ipv6" { 40 | route_table_id = aws_route_table.default.id 41 | destination_ipv6_cidr_block = "::/0" 42 | gateway_id = aws_internet_gateway.gateway.id 43 | } 44 | 45 | # Subnets (one per availability zone) 46 | 47 | resource "aws_subnet" "public" { 48 | count = length(data.aws_availability_zones.all.names) 49 | 50 | vpc_id = aws_vpc.network.id 51 | availability_zone = data.aws_availability_zones.all.names[count.index] 52 | 53 | cidr_block = cidrsubnet(var.host_cidr, 4, count.index) 54 | ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index) 55 | map_public_ip_on_launch = true 56 | assign_ipv6_address_on_creation = true 57 | 58 | tags = { 59 | "Name" = "${var.cluster_name}-public-${count.index}" 60 | } 61 | } 62 | 63 | resource "aws_route_table_association" "public" { 64 | count = length(data.aws_availability_zones.all.names) 65 | 66 | route_table_id = aws_route_table.default.id 67 | subnet_id = aws_subnet.public.*.id[count.index] 68 | } 69 | 70 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/network.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "all" { 2 | } 3 | 4 | # Network VPC, gateway, and routes 5 | 6 | resource "aws_vpc" "network" { 7 | cidr_block = var.host_cidr 8 | assign_generated_ipv6_cidr_block = true 9 | enable_dns_support = true 10 | enable_dns_hostnames = true 11 | 12 | tags = { 13 | "Name" = var.cluster_name 14 | } 15 | } 16 | 17 | resource "aws_internet_gateway" "gateway" { 18 | vpc_id = aws_vpc.network.id 19 | 20 | tags = { 21 | "Name" = var.cluster_name 22 | } 23 | } 24 | 25 | resource "aws_route_table" "default" { 26 | vpc_id = aws_vpc.network.id 27 | 28 | tags = { 29 | "Name" = var.cluster_name 30 | } 31 | } 32 | 33 | resource "aws_route" "egress-ipv4" { 34 | route_table_id = aws_route_table.default.id 35 | destination_cidr_block = "0.0.0.0/0" 36 | gateway_id = aws_internet_gateway.gateway.id 37 | } 38 | 39 | resource "aws_route" "egress-ipv6" { 40 | route_table_id = aws_route_table.default.id 41 | destination_ipv6_cidr_block = "::/0" 42 | gateway_id = aws_internet_gateway.gateway.id 43 | } 44 | 45 | # Subnets (one per availability zone) 46 | 47 | resource "aws_subnet" "public" { 48 | count = length(data.aws_availability_zones.all.names) 49 | 50 | vpc_id = aws_vpc.network.id 51 | availability_zone = data.aws_availability_zones.all.names[count.index] 52 | 53 | cidr_block = cidrsubnet(var.host_cidr, 4, count.index) 54 | ipv6_cidr_block = cidrsubnet(aws_vpc.network.ipv6_cidr_block, 8, count.index) 55 | map_public_ip_on_launch = true 56 | assign_ipv6_address_on_creation = true 57 | 58 | tags = { 59 | "Name" = "${var.cluster_name}-public-${count.index}" 60 | } 61 | } 62 | 63 | resource "aws_route_table_association" "public" { 64 | count = length(data.aws_availability_zones.all.names) 65 | 66 | route_table_id = aws_route_table.default.id 67 | subnet_id = aws_subnet.public.*.id[count.index] 68 | } 69 | 70 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/kube-state-metrics/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kube-state-metrics 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | - nodes 12 | - pods 13 | - services 14 | - resourcequotas 15 | - replicationcontrollers 16 | - limitranges 17 | - persistentvolumeclaims 18 | - persistentvolumes 19 | - namespaces 20 | - endpoints 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - extensions 26 | resources: 27 | - daemonsets 28 | - deployments 29 | - replicasets 30 | - ingresses 31 | verbs: 32 | - list 33 | - watch 34 | - apiGroups: 35 | - apps 36 | resources: 37 | - statefulsets 38 | - daemonsets 39 | - deployments 40 | - replicasets 41 | verbs: 42 | - list 43 | - watch 44 | - apiGroups: 45 | - batch 46 | resources: 47 | - cronjobs 48 | - jobs 49 | verbs: 50 | - list 51 | - watch 52 | - apiGroups: 53 | - autoscaling 54 | resources: 55 | - horizontalpodautoscalers 56 | verbs: 57 | - list 58 | - watch 59 | - apiGroups: 60 | - policy 61 | resources: 62 | - poddisruptionbudgets 63 | verbs: 64 | - list 65 | - watch 66 | - apiGroups: 67 | - certificates.k8s.io 68 | resources: 69 | - certificatesigningrequests 70 | verbs: 71 | - list 72 | - watch 73 | - apiGroups: 74 | - storage.k8s.io 75 | resources: 76 | - storageclasses 77 | - volumeattachments 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - autoscaling.k8s.io 83 | resources: 84 | - verticalpodautoscalers 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - admissionregistration.k8s.io 90 | resources: 91 | - mutatingwebhookconfigurations 92 | - validatingwebhookconfigurations 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - networking.k8s.io 98 | resources: 99 | - networkpolicies 100 | verbs: 101 | - list 102 | - watch 103 | 104 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | # Worker DNS records 2 | resource "digitalocean_record" "workers-record-a" { 3 | count = var.worker_count 4 | 5 | # DNS zone where record should be created 6 | domain = var.dns_zone 7 | 8 | name = "${var.cluster_name}-workers" 9 | type = "A" 10 | ttl = 300 11 | value = digitalocean_droplet.workers.*.ipv4_address[count.index] 12 | } 13 | 14 | /* 15 | # TODO: Only official DigitalOcean images support IPv6 16 | resource "digitalocean_record" "workers-record-aaaa" { 17 | count = var.worker_count 18 | 19 | # DNS zone where record should be created 20 | domain = var.dns_zone 21 | 22 | name = "${var.cluster_name}-workers" 23 | type = "AAAA" 24 | ttl = 300 25 | value = digitalocean_droplet.workers.*.ipv6_address[count.index] 26 | } 27 | */ 28 | 29 | # Worker droplet instances 30 | resource "digitalocean_droplet" "workers" { 31 | count = var.worker_count 32 | 33 | name = "${var.cluster_name}-worker-${count.index}" 34 | region = var.region 35 | 36 | image = var.os_image 37 | size = var.worker_type 38 | 39 | # network 40 | private_networking = true 41 | vpc_uuid = digitalocean_vpc.network.id 42 | # TODO: Only official DigitalOcean images support IPv6 43 | ipv6 = false 44 | 45 | user_data = data.ct_config.worker-ignition.rendered 46 | ssh_keys = var.ssh_fingerprints 47 | 48 | tags = [ 49 | digitalocean_tag.workers.id, 50 | ] 51 | 52 | lifecycle { 53 | create_before_destroy = true 54 | } 55 | } 56 | 57 | # Tag to label workers 58 | resource "digitalocean_tag" "workers" { 59 | name = "${var.cluster_name}-worker" 60 | } 61 | 62 | # Worker Ignition config 63 | data "ct_config" "worker-ignition" { 64 | content = data.template_file.worker-config.rendered 65 | strict = true 66 | snippets = var.worker_snippets 67 | } 68 | 69 | # Worker Fedora CoreOS config 70 | data "template_file" "worker-config" { 71 | template = file("${path.module}/fcc/worker.yaml") 72 | 73 | vars = { 74 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 75 | cluster_domain_suffix = var.cluster_domain_suffix 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/workers.tf: -------------------------------------------------------------------------------- 1 | # Worker DNS records 2 | resource "digitalocean_record" "workers-record-a" { 3 | count = var.worker_count 4 | 5 | # DNS zone where record should be created 6 | domain = var.dns_zone 7 | 8 | name = "${var.cluster_name}-workers" 9 | type = "A" 10 | ttl = 300 11 | value = digitalocean_droplet.workers.*.ipv4_address[count.index] 12 | } 13 | 14 | resource "digitalocean_record" "workers-record-aaaa" { 15 | # only official DigitalOcean images support IPv6 16 | count = local.is_official_image ? var.worker_count : 0 17 | 18 | # DNS zone where record should be created 19 | domain = var.dns_zone 20 | 21 | name = "${var.cluster_name}-workers" 22 | type = "AAAA" 23 | ttl = 300 24 | value = digitalocean_droplet.workers.*.ipv6_address[count.index] 25 | } 26 | 27 | # Worker droplet instances 28 | resource "digitalocean_droplet" "workers" { 29 | count = var.worker_count 30 | 31 | name = "${var.cluster_name}-worker-${count.index}" 32 | region = var.region 33 | 34 | image = var.os_image 35 | size = var.worker_type 36 | 37 | # network 38 | private_networking = true 39 | vpc_uuid = digitalocean_vpc.network.id 40 | # only official DigitalOcean images support IPv6 41 | ipv6 = local.is_official_image 42 | 43 | user_data = data.ct_config.worker-ignition.rendered 44 | ssh_keys = var.ssh_fingerprints 45 | 46 | tags = [ 47 | digitalocean_tag.workers.id, 48 | ] 49 | 50 | lifecycle { 51 | create_before_destroy = true 52 | } 53 | } 54 | 55 | # Tag to label workers 56 | resource "digitalocean_tag" "workers" { 57 | name = "${var.cluster_name}-worker" 58 | } 59 | 60 | # Worker Ignition config 61 | data "ct_config" "worker-ignition" { 62 | content = data.template_file.worker-config.rendered 63 | strict = true 64 | snippets = var.worker_snippets 65 | } 66 | 67 | # Worker Container Linux config 68 | data "template_file" "worker-config" { 69 | template = file("${path.module}/cl/worker.yaml") 70 | 71 | vars = { 72 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 73 | cluster_domain_suffix = var.cluster_domain_suffix 74 | } 75 | } 76 | 77 | -------------------------------------------------------------------------------- /digital-ocean/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | digitalocean_firewall.rules 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = digitalocean_droplet.controllers.*.ipv4_address[count.index] 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = module.bootstrap.kubeconfig-kubelet 28 | destination = "$HOME/kubeconfig" 29 | } 30 | 31 | provisioner "file" { 32 | content = join("\n", local.assets_bundle) 33 | destination = "$HOME/assets" 34 | } 35 | 36 | provisioner "remote-exec" { 37 | inline = [ 38 | "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", 39 | "sudo /opt/bootstrap/layout", 40 | ] 41 | } 42 | } 43 | 44 | # Secure copy kubeconfig to all workers. Activates kubelet.service. 45 | resource "null_resource" "copy-worker-secrets" { 46 | count = var.worker_count 47 | 48 | connection { 49 | type = "ssh" 50 | host = digitalocean_droplet.workers.*.ipv4_address[count.index] 51 | user = "core" 52 | timeout = "15m" 53 | } 54 | 55 | provisioner "file" { 56 | content = module.bootstrap.kubeconfig-kubelet 57 | destination = "$HOME/kubeconfig" 58 | } 59 | 60 | provisioner "remote-exec" { 61 | inline = [ 62 | "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", 63 | ] 64 | } 65 | } 66 | 67 | # Connect to a controller to perform one-time cluster bootstrap. 68 | resource "null_resource" "bootstrap" { 69 | depends_on = [ 70 | null_resource.copy-controller-secrets, 71 | null_resource.copy-worker-secrets, 72 | ] 73 | 74 | connection { 75 | type = "ssh" 76 | host = digitalocean_droplet.controllers[0].ipv4_address 77 | user = "core" 78 | timeout = "15m" 79 | } 80 | 81 | provisioner "remote-exec" { 82 | inline = [ 83 | "sudo systemctl start bootstrap", 84 | ] 85 | } 86 | } 87 | 88 | -------------------------------------------------------------------------------- /addons/prometheus/exporters/node-exporter/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | spec: 7 | updateStrategy: 8 | type: RollingUpdate 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: node-exporter 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: node-exporter 19 | phase: prod 20 | annotations: 21 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 22 | spec: 23 | serviceAccountName: node-exporter 24 | securityContext: 25 | runAsNonRoot: true 26 | runAsUser: 65534 27 | hostNetwork: true 28 | hostPID: true 29 | containers: 30 | - name: node-exporter 31 | image: quay.io/prometheus/node-exporter:v1.0.1 32 | args: 33 | - --path.procfs=/host/proc 34 | - --path.sysfs=/host/sys 35 | - --path.rootfs=/host/root 36 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) 37 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ 38 | ports: 39 | - name: metrics 40 | containerPort: 9100 41 | hostPort: 9100 42 | resources: 43 | requests: 44 | cpu: 100m 45 | memory: 50Mi 46 | limits: 47 | cpu: 200m 48 | memory: 100Mi 49 | volumeMounts: 50 | - name: proc 51 | mountPath: /host/proc 52 | readOnly: true 53 | - name: sys 54 | mountPath: /host/sys 55 | readOnly: true 56 | - name: root 57 | mountPath: /host/root 58 | readOnly: true 59 | tolerations: 60 | - key: node-role.kubernetes.io/master 61 | operator: Exists 62 | - key: node.kubernetes.io/not-ready 63 | operator: Exists 64 | volumes: 65 | - name: proc 66 | hostPath: 67 | path: /proc 68 | - name: sys 69 | hostPath: 70 | path: /sys 71 | - name: root 72 | hostPath: 73 | path: / 74 | -------------------------------------------------------------------------------- /digital-ocean/container-linux/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = var.controller_count 13 | 14 | depends_on = [ 15 | module.bootstrap, 16 | digitalocean_firewall.rules 17 | ] 18 | 19 | connection { 20 | type = "ssh" 21 | host = digitalocean_droplet.controllers.*.ipv4_address[count.index] 22 | user = "core" 23 | timeout = "15m" 24 | } 25 | 26 | provisioner "file" { 27 | content = module.bootstrap.kubeconfig-kubelet 28 | destination = "$HOME/kubeconfig" 29 | } 30 | 31 | provisioner "file" { 32 | content = join("\n", local.assets_bundle) 33 | destination = "$HOME/assets" 34 | } 35 | 36 | provisioner "remote-exec" { 37 | inline = [ 38 | "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", 39 | "sudo /opt/bootstrap/layout", 40 | ] 41 | } 42 | } 43 | 44 | # Secure copy kubeconfig to all workers. Activates kubelet.service. 45 | resource "null_resource" "copy-worker-secrets" { 46 | count = var.worker_count 47 | 48 | connection { 49 | type = "ssh" 50 | host = digitalocean_droplet.workers.*.ipv4_address[count.index] 51 | user = "core" 52 | timeout = "15m" 53 | } 54 | 55 | provisioner "file" { 56 | content = module.bootstrap.kubeconfig-kubelet 57 | destination = "$HOME/kubeconfig" 58 | } 59 | 60 | provisioner "remote-exec" { 61 | inline = [ 62 | "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", 63 | ] 64 | } 65 | } 66 | 67 | # Connect to a controller to perform one-time cluster bootstrap. 68 | resource "null_resource" "bootstrap" { 69 | depends_on = [ 70 | null_resource.copy-controller-secrets, 71 | null_resource.copy-worker-secrets, 72 | ] 73 | 74 | connection { 75 | type = "ssh" 76 | host = digitalocean_droplet.controllers[0].ipv4_address 77 | user = "core" 78 | timeout = "15m" 79 | } 80 | 81 | provisioner "remote-exec" { 82 | inline = [ 83 | "sudo systemctl start bootstrap", 84 | ] 85 | } 86 | } 87 | 88 | -------------------------------------------------------------------------------- /docs/addons/prometheus.md: -------------------------------------------------------------------------------- 1 | # Prometheus 2 | 3 | Prometheus collects metrics (e.g. `node_memory_usage_bytes`) from *targets* by scraping their HTTP metrics endpoints. Targets are organized into *jobs*, defined in the Prometheus config. Targets may expose counter, gauge, histogram, or summary metrics. 4 | 5 | Here's a simple config from the Prometheus [tutorial](https://prometheus.io/docs/introduction/getting_started/). 6 | 7 | ``` 8 | global: 9 | scrape_interval: 15s 10 | scrape_configs: 11 | - job_name: 'prometheus' 12 | scrape_interval: 5s 13 | static_configs: 14 | - targets: ['localhost:9090'] 15 | ``` 16 | 17 | On Kubernetes clusters, Prometheus is run as a Deployment, configured with a ConfigMap, and accessed via a Service or Ingress. 18 | 19 | ``` 20 | kubectl apply -f addons/prometheus -R 21 | ``` 22 | 23 | The ConfigMap configures Prometheus to discover apiservers, kubelets, cAdvisor, services, endpoints, and exporters. By default, data is kept in an `emptyDir` so it is persisted until the pod is rescheduled. 24 | 25 | ### Exporters 26 | 27 | Exporters expose metrics for 3rd-party systems that don't natively expose Prometheus metrics. 28 | 29 | * [node_exporter](https://github.com/prometheus/node_exporter) - DaemonSet that exposes a machine's hardware and OS metrics 30 | * [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) - Deployment that exposes Kubernetes object metrics 31 | * [blackbox_exporter](https://github.com/prometheus/blackbox_exporter) - Scrapes HTTP, HTTPS, DNS, TCP, or ICMP endpoints and exposes availability as metrics 32 | 33 | ### Queries and Alerts 34 | 35 | Prometheus provides a basic UI for querying metrics and viewing alerts. Use `kubectl` to authenticate to the apiserver and create a local port-forward to the Prometheus pod. 36 | 37 | ``` 38 | kubectl get pods -n monitoring 39 | kubectl port-forward prometheus-POD-ID 9090 -n monitoring 40 | ``` 41 | 42 | Visit [127.0.0.1:9090](http://127.0.0.1:9090) to query [expressions](http://127.0.0.1:9090/graph), view [targets](http://127.0.0.1:9090/targets), or check [alerts](http://127.0.0.1:9090/alerts). 43 | 44 | ![Prometheus Graph](../img/prometheus-graph.png) 45 |
46 | ![Prometheus Targets](../img/prometheus-targets.png) 47 |
48 | ![Prometheus Alerts](../img/prometheus-alerts.png) 49 | 50 | Use [Grafana](/addons/grafana/) to view or build dashboards that use Prometheus as the datasource. 51 | -------------------------------------------------------------------------------- /addons/nginx-ingress/bare-metal/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ingress-controller-public 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: ingress-controller-public 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: ingress-controller-public 19 | phase: prod 20 | annotations: 21 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 22 | spec: 23 | containers: 24 | - name: nginx-ingress-controller 25 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 26 | args: 27 | - /nginx-ingress-controller 28 | - --ingress-class=public 29 | # use downward API 30 | env: 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: POD_NAMESPACE 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.namespace 39 | ports: 40 | - name: http 41 | containerPort: 80 42 | - name: https 43 | containerPort: 443 44 | - name: health 45 | containerPort: 10254 46 | livenessProbe: 47 | httpGet: 48 | path: /healthz 49 | port: 10254 50 | scheme: HTTP 51 | initialDelaySeconds: 10 52 | periodSeconds: 10 53 | successThreshold: 1 54 | failureThreshold: 3 55 | timeoutSeconds: 5 56 | readinessProbe: 57 | httpGet: 58 | path: /healthz 59 | port: 10254 60 | scheme: HTTP 61 | periodSeconds: 10 62 | successThreshold: 1 63 | failureThreshold: 3 64 | timeoutSeconds: 5 65 | lifecycle: 66 | preStop: 67 | exec: 68 | command: 69 | - /wait-shutdown 70 | securityContext: 71 | capabilities: 72 | add: 73 | - NET_BIND_SERVICE 74 | drop: 75 | - ALL 76 | runAsUser: 101 # www-data 77 | restartPolicy: Always 78 | terminationGracePeriodSeconds: 300 79 | 80 | -------------------------------------------------------------------------------- /docs/topics/performance.md: -------------------------------------------------------------------------------- 1 | # Performance 2 | 3 | ## Provision Time 4 | 5 | Provisioning times vary based on the operating system and platform. Sampling the time to create (apply) and destroy clusters with 1 controller and 2 workers shows (roughly) what to expect. 6 | 7 | | Platform | Apply | Destroy | 8 | |---------------|-------|---------| 9 | | AWS | 5 min | 3 min | 10 | | Azure | 10 min | 7 min | 11 | | Bare-Metal | 10-15 min | NA | 12 | | Digital Ocean | 3 min 30 sec | 20 sec | 13 | | Google Cloud | 8 min | 5 min | 14 | 15 | Notes: 16 | 17 | * SOA TTL and NXDOMAIN caching can have a large impact on provision time 18 | * Platforms with auto-scaling take more time to provision (AWS, Azure, Google) 19 | * Bare-metal POST times and network bandwidth will affect provision times 20 | 21 | ## Network Performance 22 | 23 | Network performance varies based on the platform and CNI plugin. `iperf` was used to measure the bandwidth between different hosts and different pods. Host-to-host shows typical bandwidth between host machines. Pod-to-pod shows the bandwidth between two `iperf` containers. 24 | 25 | | Platform / Plugin | Theory | Host to Host | Pod to Pod | 26 | |----------------------------|-------:|-------------:|-------------:| 27 | | AWS (flannel) | 5 Gb/s | 4.94 Gb/s | 4.89 Gb/s | 28 | | AWS (calico, MTU 1480) | 5 Gb/s | 4.94 Gb/s | 4.42 Gb/s | 29 | | AWS (calico, MTU 8981) | 5 Gb/s | 4.94 Gb/s | 4.90 Gb/s | 30 | | Azure (flannel) | Varies | 749 Mb/s | 650 Mb/s | 31 | | Azure (calico) | Varies | 749 Mb/s | 650 Mb/s | 32 | | Bare-Metal (flannel) | 1 Gb/s | 940 Mb/s | 903 Mb/s | 33 | | Bare-Metal (calico) | 1 Gb/s | 940 Mb/s | 931 Mb/s | 34 | | Digital Ocean (flannel) | Varies | 1.97 Gb/s | 1.20 Gb/s | 35 | | Digital Ocean (calico) | Varies | 1.97 Gb/s | 1.20 Gb/s | 36 | | Google Cloud (flannel) | 2 Gb/s | 1.94 Gb/s | 1.76 Gb/s | 37 | | Google Cloud (calico) | 2 Gb/s | 1.94 Gb/s | 1.81 Gb/s | 38 | 39 | Notes: 40 | 41 | * Calico and Flannel have comparable performance. Platform and configuration differences dominate. 42 | * Azure and DigitalOcean network performance can be quite variable or depend on machine type 43 | * Only [certain AWS EC2 instance types](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) allow jumbo frames. This is why the default MTU on AWS must be 1480. 44 | 45 | -------------------------------------------------------------------------------- /docs/architecture/operating-systems.md: -------------------------------------------------------------------------------- 1 | # Operating Systems 2 | 3 | Typhoon supports [Fedora CoreOS](https://getfedora.org/coreos/) and [Flatcar Linux](https://www.flatcar-linux.org/). These operating systems were chosen because they offer: 4 | 5 | * Minimalism and focus on clustered operation 6 | * Automated and atomic operating system upgrades 7 | * Declarative and immutable configuration 8 | * Optimization for containerized applications 9 | 10 | Together, they diversify Typhoon to support a range of container technologies. 11 | 12 | * Fedora CoreOS: rpm-ostree, podman, moby 13 | * Flatcar Linux: Gentoo core, rkt-fly, docker 14 | 15 | ## Host Properties 16 | 17 | | Property | Flatcar Linux | Fedora CoreOS | 18 | |-------------------|---------------------------------|---------------| 19 | | Kernel | ~4.19.x | ~5.5.x | 20 | | systemd | 241 | 243 | 21 | | Ignition system | Ignition v2.x spec | Ignition v3.x spec | 22 | | Container Engine | docker 18.06.3-ce | docker 18.09.8 | 23 | | storage driver | overlay2 (extfs) | overlay2 (xfs) | 24 | | logging driver | json-file | journald | 25 | | cgroup driver | cgroupfs (except Flatcar edge) | systemd | 26 | | Networking | systemd-networkd | NetworkManager | 27 | | Username | core | core | 28 | 29 | ## Kubernetes Properties 30 | 31 | | Property | Flatcar Linux | Fedora CoreOS | 32 | |-------------------|-----------------|---------------| 33 | | single-master | all platforms | all platforms | 34 | | multi-master | all platforms | all platforms | 35 | | control plane | static pods | static pods | 36 | | kubelet image | kubelet [image](https://github.com/poseidon/kubelet) with upstream binary | kubelet [image](https://github.com/poseidon/kubelet) with upstream binary | 37 | | control plane images | upstream images | upstream images | 38 | | on-host etcd | rkt-fly | podman | 39 | | on-host kubelet | rkt-fly | podman | 40 | | CNI plugins | calico or flannel | calico or flannel | 41 | | coordinated drain & OS update | [CLUO](https://github.com/coreos/container-linux-update-operator) addon | (planned) | 42 | 43 | ## Directory Locations 44 | 45 | Typhoon conventional directories. 46 | 47 | | Kubelet setting | Host location | 48 | |-------------------|--------------------------------| 49 | | cni-conf-dir | /etc/kubernetes/cni/net.d | 50 | | pod-manifest-path | /etc/kubernetes/manifests | 51 | | volume-plugin-dir | /var/lib/kubelet/volumeplugins | 52 | 53 | -------------------------------------------------------------------------------- /addons/nginx-ingress/aws/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | annotations: 21 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 22 | spec: 23 | containers: 24 | - name: nginx-ingress-controller 25 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 26 | args: 27 | - /nginx-ingress-controller 28 | - --ingress-class=public 29 | # use downward API 30 | env: 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: POD_NAMESPACE 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.namespace 39 | ports: 40 | - name: http 41 | containerPort: 80 42 | hostPort: 80 43 | - name: https 44 | containerPort: 443 45 | hostPort: 443 46 | - name: health 47 | containerPort: 10254 48 | hostPort: 10254 49 | livenessProbe: 50 | failureThreshold: 3 51 | httpGet: 52 | path: /healthz 53 | port: 10254 54 | scheme: HTTP 55 | initialDelaySeconds: 10 56 | periodSeconds: 10 57 | successThreshold: 1 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | failureThreshold: 3 61 | httpGet: 62 | path: /healthz 63 | port: 10254 64 | scheme: HTTP 65 | periodSeconds: 10 66 | successThreshold: 1 67 | timeoutSeconds: 5 68 | lifecycle: 69 | preStop: 70 | exec: 71 | command: 72 | - /wait-shutdown 73 | securityContext: 74 | capabilities: 75 | add: 76 | - NET_BIND_SERVICE 77 | drop: 78 | - ALL 79 | runAsUser: 101 # www-data 80 | restartPolicy: Always 81 | terminationGracePeriodSeconds: 300 82 | -------------------------------------------------------------------------------- /addons/nginx-ingress/azure/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | annotations: 21 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 22 | spec: 23 | containers: 24 | - name: nginx-ingress-controller 25 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 26 | args: 27 | - /nginx-ingress-controller 28 | - --ingress-class=public 29 | # use downward API 30 | env: 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: POD_NAMESPACE 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.namespace 39 | ports: 40 | - name: http 41 | containerPort: 80 42 | hostPort: 80 43 | - name: https 44 | containerPort: 443 45 | hostPort: 443 46 | - name: health 47 | containerPort: 10254 48 | hostPort: 10254 49 | livenessProbe: 50 | failureThreshold: 3 51 | httpGet: 52 | path: /healthz 53 | port: 10254 54 | scheme: HTTP 55 | initialDelaySeconds: 10 56 | periodSeconds: 10 57 | successThreshold: 1 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | failureThreshold: 3 61 | httpGet: 62 | path: /healthz 63 | port: 10254 64 | scheme: HTTP 65 | periodSeconds: 10 66 | successThreshold: 1 67 | timeoutSeconds: 5 68 | lifecycle: 69 | preStop: 70 | exec: 71 | command: 72 | - /wait-shutdown 73 | securityContext: 74 | capabilities: 75 | add: 76 | - NET_BIND_SERVICE 77 | drop: 78 | - ALL 79 | runAsUser: 101 # www-data 80 | restartPolicy: Always 81 | terminationGracePeriodSeconds: 300 82 | -------------------------------------------------------------------------------- /addons/nginx-ingress/google-cloud/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | replicas: 2 8 | strategy: 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | annotations: 21 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 22 | spec: 23 | containers: 24 | - name: nginx-ingress-controller 25 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 26 | args: 27 | - /nginx-ingress-controller 28 | - --ingress-class=public 29 | # use downward API 30 | env: 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: POD_NAMESPACE 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.namespace 39 | ports: 40 | - name: http 41 | containerPort: 80 42 | hostPort: 80 43 | - name: https 44 | containerPort: 443 45 | hostPort: 443 46 | - name: health 47 | containerPort: 10254 48 | hostPort: 10254 49 | livenessProbe: 50 | failureThreshold: 3 51 | httpGet: 52 | path: /healthz 53 | port: 10254 54 | scheme: HTTP 55 | initialDelaySeconds: 10 56 | periodSeconds: 10 57 | successThreshold: 1 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | failureThreshold: 3 61 | httpGet: 62 | path: /healthz 63 | port: 10254 64 | scheme: HTTP 65 | periodSeconds: 10 66 | successThreshold: 1 67 | timeoutSeconds: 5 68 | lifecycle: 69 | preStop: 70 | exec: 71 | command: 72 | - /wait-shutdown 73 | securityContext: 74 | capabilities: 75 | add: 76 | - NET_BIND_SERVICE 77 | drop: 78 | - ALL 79 | runAsUser: 101 # www-data 80 | restartPolicy: Always 81 | terminationGracePeriodSeconds: 300 82 | -------------------------------------------------------------------------------- /addons/nginx-ingress/digital-ocean/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress 6 | spec: 7 | updateStrategy: 8 | type: RollingUpdate 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | selector: 12 | matchLabels: 13 | name: nginx-ingress-controller 14 | phase: prod 15 | template: 16 | metadata: 17 | labels: 18 | name: nginx-ingress-controller 19 | phase: prod 20 | annotations: 21 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 22 | spec: 23 | containers: 24 | - name: nginx-ingress-controller 25 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.33.0 26 | args: 27 | - /nginx-ingress-controller 28 | - --ingress-class=public 29 | # use downward API 30 | env: 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: POD_NAMESPACE 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.namespace 39 | ports: 40 | - name: http 41 | containerPort: 80 42 | hostPort: 80 43 | - name: https 44 | containerPort: 443 45 | hostPort: 443 46 | - name: health 47 | containerPort: 10254 48 | hostPort: 10254 49 | livenessProbe: 50 | failureThreshold: 3 51 | httpGet: 52 | path: /healthz 53 | port: 10254 54 | scheme: HTTP 55 | initialDelaySeconds: 10 56 | periodSeconds: 10 57 | successThreshold: 1 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | failureThreshold: 3 61 | httpGet: 62 | path: /healthz 63 | port: 10254 64 | scheme: HTTP 65 | periodSeconds: 10 66 | successThreshold: 1 67 | timeoutSeconds: 5 68 | lifecycle: 69 | preStop: 70 | exec: 71 | command: 72 | - /wait-shutdown 73 | securityContext: 74 | capabilities: 75 | add: 76 | - NET_BIND_SERVICE 77 | drop: 78 | - ALL 79 | runAsUser: 101 # www-data 80 | restartPolicy: Always 81 | terminationGracePeriodSeconds: 300 82 | -------------------------------------------------------------------------------- /google-cloud/container-linux/kubernetes/workers/workers.tf: -------------------------------------------------------------------------------- 1 | # Managed instance group of workers 2 | resource "google_compute_region_instance_group_manager" "workers" { 3 | name = "${var.name}-worker-group" 4 | description = "Compute instance group of ${var.name} workers" 5 | 6 | # instance name prefix for instances in the group 7 | base_instance_name = "${var.name}-worker" 8 | region = var.region 9 | version { 10 | name = "default" 11 | instance_template = google_compute_instance_template.worker.self_link 12 | } 13 | 14 | target_size = var.worker_count 15 | target_pools = [google_compute_target_pool.workers.self_link] 16 | 17 | named_port { 18 | name = "http" 19 | port = "80" 20 | } 21 | 22 | named_port { 23 | name = "https" 24 | port = "443" 25 | } 26 | } 27 | 28 | # Worker instance template 29 | resource "google_compute_instance_template" "worker" { 30 | name_prefix = "${var.name}-worker-" 31 | description = "Worker Instance template" 32 | machine_type = var.machine_type 33 | 34 | metadata = { 35 | user-data = data.ct_config.worker-ignition.rendered 36 | } 37 | 38 | scheduling { 39 | automatic_restart = var.preemptible ? false : true 40 | preemptible = var.preemptible 41 | } 42 | 43 | disk { 44 | auto_delete = true 45 | boot = true 46 | source_image = var.os_image 47 | disk_size_gb = var.disk_size 48 | } 49 | 50 | network_interface { 51 | network = var.network 52 | 53 | # Ephemeral external IP 54 | access_config { 55 | } 56 | } 57 | 58 | can_ip_forward = true 59 | tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"] 60 | 61 | guest_accelerator { 62 | count = var.accelerator_count 63 | type = var.accelerator_type 64 | } 65 | 66 | lifecycle { 67 | # To update an Instance Template, Terraform should replace the existing resource 68 | create_before_destroy = true 69 | } 70 | } 71 | 72 | # Worker Ignition config 73 | data "ct_config" "worker-ignition" { 74 | content = data.template_file.worker-config.rendered 75 | strict = true 76 | snippets = var.snippets 77 | } 78 | 79 | # Worker Container Linux config 80 | data "template_file" "worker-config" { 81 | template = file("${path.module}/cl/worker.yaml") 82 | 83 | vars = { 84 | kubeconfig = indent(10, var.kubeconfig) 85 | ssh_authorized_key = var.ssh_authorized_key 86 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 87 | cluster_domain_suffix = var.cluster_domain_suffix 88 | node_labels = join(",", var.node_labels) 89 | } 90 | } 91 | 92 | -------------------------------------------------------------------------------- /google-cloud/fedora-coreos/kubernetes/workers/workers.tf: -------------------------------------------------------------------------------- 1 | # Managed instance group of workers 2 | resource "google_compute_region_instance_group_manager" "workers" { 3 | name = "${var.name}-worker-group" 4 | description = "Compute instance group of ${var.name} workers" 5 | 6 | # instance name prefix for instances in the group 7 | base_instance_name = "${var.name}-worker" 8 | region = var.region 9 | version { 10 | name = "default" 11 | instance_template = google_compute_instance_template.worker.self_link 12 | } 13 | 14 | target_size = var.worker_count 15 | target_pools = [google_compute_target_pool.workers.self_link] 16 | 17 | named_port { 18 | name = "http" 19 | port = "80" 20 | } 21 | 22 | named_port { 23 | name = "https" 24 | port = "443" 25 | } 26 | } 27 | 28 | # Worker instance template 29 | resource "google_compute_instance_template" "worker" { 30 | name_prefix = "${var.name}-worker-" 31 | description = "Worker Instance template" 32 | machine_type = var.machine_type 33 | 34 | metadata = { 35 | user-data = data.ct_config.worker-ignition.rendered 36 | } 37 | 38 | scheduling { 39 | automatic_restart = var.preemptible ? false : true 40 | preemptible = var.preemptible 41 | } 42 | 43 | disk { 44 | auto_delete = true 45 | boot = true 46 | source_image = data.google_compute_image.fedora-coreos.self_link 47 | disk_size_gb = var.disk_size 48 | } 49 | 50 | network_interface { 51 | network = var.network 52 | 53 | # Ephemeral external IP 54 | access_config { 55 | } 56 | } 57 | 58 | can_ip_forward = true 59 | tags = ["worker", "${var.cluster_name}-worker", "${var.name}-worker"] 60 | 61 | guest_accelerator { 62 | count = var.accelerator_count 63 | type = var.accelerator_type 64 | } 65 | 66 | lifecycle { 67 | ignore_changes = [ 68 | disk[0].source_image 69 | ] 70 | # To update an Instance Template, Terraform should replace the existing resource 71 | create_before_destroy = true 72 | } 73 | } 74 | 75 | # Worker Ignition config 76 | data "ct_config" "worker-ignition" { 77 | content = data.template_file.worker-config.rendered 78 | strict = true 79 | snippets = var.snippets 80 | } 81 | 82 | # Worker Fedora CoreOS config 83 | data "template_file" "worker-config" { 84 | template = file("${path.module}/fcc/worker.yaml") 85 | 86 | vars = { 87 | kubeconfig = indent(10, var.kubeconfig) 88 | ssh_authorized_key = var.ssh_authorized_key 89 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 90 | cluster_domain_suffix = var.cluster_domain_suffix 91 | node_labels = join(",", var.node_labels) 92 | } 93 | } 94 | 95 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/nlb.tf: -------------------------------------------------------------------------------- 1 | # Network Load Balancer DNS Record 2 | resource "aws_route53_record" "apiserver" { 3 | zone_id = var.dns_zone_id 4 | 5 | name = format("%s.%s.", var.cluster_name, var.dns_zone) 6 | type = "A" 7 | 8 | # AWS recommends their special "alias" records for NLBs 9 | alias { 10 | name = aws_lb.nlb.dns_name 11 | zone_id = aws_lb.nlb.zone_id 12 | evaluate_target_health = true 13 | } 14 | } 15 | 16 | # Network Load Balancer for apiservers and ingress 17 | resource "aws_lb" "nlb" { 18 | name = "${var.cluster_name}-nlb" 19 | load_balancer_type = "network" 20 | internal = false 21 | 22 | subnets = aws_subnet.public.*.id 23 | 24 | enable_cross_zone_load_balancing = true 25 | } 26 | 27 | # Forward TCP apiserver traffic to controllers 28 | resource "aws_lb_listener" "apiserver-https" { 29 | load_balancer_arn = aws_lb.nlb.arn 30 | protocol = "TCP" 31 | port = "6443" 32 | 33 | default_action { 34 | type = "forward" 35 | target_group_arn = aws_lb_target_group.controllers.arn 36 | } 37 | } 38 | 39 | # Forward HTTP ingress traffic to workers 40 | resource "aws_lb_listener" "ingress-http" { 41 | load_balancer_arn = aws_lb.nlb.arn 42 | protocol = "TCP" 43 | port = 80 44 | 45 | default_action { 46 | type = "forward" 47 | target_group_arn = module.workers.target_group_http 48 | } 49 | } 50 | 51 | # Forward HTTPS ingress traffic to workers 52 | resource "aws_lb_listener" "ingress-https" { 53 | load_balancer_arn = aws_lb.nlb.arn 54 | protocol = "TCP" 55 | port = 443 56 | 57 | default_action { 58 | type = "forward" 59 | target_group_arn = module.workers.target_group_https 60 | } 61 | } 62 | 63 | # Target group of controllers 64 | resource "aws_lb_target_group" "controllers" { 65 | name = "${var.cluster_name}-controllers" 66 | vpc_id = aws_vpc.network.id 67 | target_type = "instance" 68 | 69 | protocol = "TCP" 70 | port = 6443 71 | 72 | # TCP health check for apiserver 73 | health_check { 74 | protocol = "TCP" 75 | port = 6443 76 | 77 | # NLBs required to use same healthy and unhealthy thresholds 78 | healthy_threshold = 3 79 | unhealthy_threshold = 3 80 | 81 | # Interval between health checks required to be 10 or 30 82 | interval = 10 83 | } 84 | } 85 | 86 | # Attach controller instances to apiserver NLB 87 | resource "aws_lb_target_group_attachment" "controllers" { 88 | count = var.controller_count 89 | 90 | target_group_arn = aws_lb_target_group.controllers.arn 91 | target_id = aws_instance.controllers.*.id[count.index] 92 | port = 6443 93 | } 94 | 95 | -------------------------------------------------------------------------------- /aws/fedora-coreos/kubernetes/nlb.tf: -------------------------------------------------------------------------------- 1 | # Network Load Balancer DNS Record 2 | resource "aws_route53_record" "apiserver" { 3 | zone_id = var.dns_zone_id 4 | 5 | name = format("%s.%s.", var.cluster_name, var.dns_zone) 6 | type = "A" 7 | 8 | # AWS recommends their special "alias" records for NLBs 9 | alias { 10 | name = aws_lb.nlb.dns_name 11 | zone_id = aws_lb.nlb.zone_id 12 | evaluate_target_health = true 13 | } 14 | } 15 | 16 | # Network Load Balancer for apiservers and ingress 17 | resource "aws_lb" "nlb" { 18 | name = "${var.cluster_name}-nlb" 19 | load_balancer_type = "network" 20 | internal = false 21 | 22 | subnets = aws_subnet.public.*.id 23 | 24 | enable_cross_zone_load_balancing = true 25 | } 26 | 27 | # Forward TCP apiserver traffic to controllers 28 | resource "aws_lb_listener" "apiserver-https" { 29 | load_balancer_arn = aws_lb.nlb.arn 30 | protocol = "TCP" 31 | port = "6443" 32 | 33 | default_action { 34 | type = "forward" 35 | target_group_arn = aws_lb_target_group.controllers.arn 36 | } 37 | } 38 | 39 | # Forward HTTP ingress traffic to workers 40 | resource "aws_lb_listener" "ingress-http" { 41 | load_balancer_arn = aws_lb.nlb.arn 42 | protocol = "TCP" 43 | port = 80 44 | 45 | default_action { 46 | type = "forward" 47 | target_group_arn = module.workers.target_group_http 48 | } 49 | } 50 | 51 | # Forward HTTPS ingress traffic to workers 52 | resource "aws_lb_listener" "ingress-https" { 53 | load_balancer_arn = aws_lb.nlb.arn 54 | protocol = "TCP" 55 | port = 443 56 | 57 | default_action { 58 | type = "forward" 59 | target_group_arn = module.workers.target_group_https 60 | } 61 | } 62 | 63 | # Target group of controllers 64 | resource "aws_lb_target_group" "controllers" { 65 | name = "${var.cluster_name}-controllers" 66 | vpc_id = aws_vpc.network.id 67 | target_type = "instance" 68 | 69 | protocol = "TCP" 70 | port = 6443 71 | 72 | # TCP health check for apiserver 73 | health_check { 74 | protocol = "TCP" 75 | port = 6443 76 | 77 | # NLBs required to use same healthy and unhealthy thresholds 78 | healthy_threshold = 3 79 | unhealthy_threshold = 3 80 | 81 | # Interval between health checks required to be 10 or 30 82 | interval = 10 83 | } 84 | } 85 | 86 | # Attach controller instances to apiserver NLB 87 | resource "aws_lb_target_group_attachment" "controllers" { 88 | count = var.controller_count 89 | 90 | target_group_arn = aws_lb_target_group.controllers.arn 91 | target_id = aws_instance.controllers.*.id[count.index] 92 | port = 6443 93 | } 94 | 95 | -------------------------------------------------------------------------------- /azure/fedora-coreos/kubernetes/workers/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Unique name for the worker pool" 4 | } 5 | 6 | # Azure 7 | 8 | variable "region" { 9 | type = string 10 | description = "Must be set to the Azure Region of cluster" 11 | } 12 | 13 | variable "resource_group_name" { 14 | type = string 15 | description = "Must be set to the resource group name of cluster" 16 | } 17 | 18 | variable "subnet_id" { 19 | type = string 20 | description = "Must be set to the `worker_subnet_id` output by cluster" 21 | } 22 | 23 | variable "security_group_id" { 24 | type = string 25 | description = "Must be set to the `worker_security_group_id` output by cluster" 26 | } 27 | 28 | variable "backend_address_pool_id" { 29 | type = string 30 | description = "Must be set to the `worker_backend_address_pool_id` output by cluster" 31 | } 32 | 33 | # instances 34 | 35 | variable "worker_count" { 36 | type = number 37 | description = "Number of instances" 38 | default = 1 39 | } 40 | 41 | variable "vm_type" { 42 | type = string 43 | description = "Machine type for instances (see `az vm list-skus --location centralus`)" 44 | default = "Standard_DS1_v2" 45 | } 46 | 47 | variable "os_image" { 48 | type = string 49 | description = "Fedora CoreOS image for instances" 50 | } 51 | 52 | variable "priority" { 53 | type = string 54 | description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time." 55 | default = "Regular" 56 | } 57 | 58 | variable "snippets" { 59 | type = list(string) 60 | description = "Fedora CoreOS Config snippets" 61 | default = [] 62 | } 63 | 64 | # configuration 65 | 66 | variable "kubeconfig" { 67 | type = string 68 | description = "Must be set to `kubeconfig` output by cluster" 69 | } 70 | 71 | variable "ssh_authorized_key" { 72 | type = string 73 | description = "SSH public key for user 'core'" 74 | } 75 | 76 | variable "service_cidr" { 77 | type = string 78 | description = < 0 ? var.spot_price : null 50 | enable_monitoring = false 51 | 52 | user_data = data.ct_config.worker-ignition.rendered 53 | 54 | # storage 55 | root_block_device { 56 | volume_type = var.disk_type 57 | volume_size = var.disk_size 58 | iops = var.disk_iops 59 | encrypted = true 60 | } 61 | 62 | # network 63 | security_groups = var.security_groups 64 | 65 | lifecycle { 66 | // Override the default destroy and replace update behavior 67 | create_before_destroy = true 68 | ignore_changes = [image_id] 69 | } 70 | } 71 | 72 | # Worker Ignition config 73 | data "ct_config" "worker-ignition" { 74 | content = data.template_file.worker-config.rendered 75 | strict = true 76 | snippets = var.snippets 77 | } 78 | 79 | # Worker Fedora CoreOS config 80 | data "template_file" "worker-config" { 81 | template = file("${path.module}/fcc/worker.yaml") 82 | 83 | vars = { 84 | kubeconfig = indent(10, var.kubeconfig) 85 | ssh_authorized_key = var.ssh_authorized_key 86 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 87 | cluster_domain_suffix = var.cluster_domain_suffix 88 | node_labels = join(",", var.node_labels) 89 | } 90 | } 91 | 92 | -------------------------------------------------------------------------------- /bare-metal/fedora-coreos/kubernetes/ssh.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # format assets for distribution 3 | assets_bundle = [ 4 | # header with the unpack location 5 | for key, value in module.bootstrap.assets_dist : 6 | format("##### %s\n%s", key, value) 7 | ] 8 | } 9 | 10 | # Secure copy assets to controllers. Activates kubelet.service 11 | resource "null_resource" "copy-controller-secrets" { 12 | count = length(var.controllers) 13 | 14 | # Without depends_on, remote-exec could start and wait for machines before 15 | # matchbox groups are written, causing a deadlock. 16 | depends_on = [ 17 | matchbox_group.controller, 18 | matchbox_group.worker, 19 | module.bootstrap, 20 | ] 21 | 22 | connection { 23 | type = "ssh" 24 | host = var.controllers.*.domain[count.index] 25 | user = "core" 26 | timeout = "60m" 27 | } 28 | 29 | provisioner "file" { 30 | content = module.bootstrap.kubeconfig-kubelet 31 | destination = "$HOME/kubeconfig" 32 | } 33 | 34 | provisioner "file" { 35 | content = join("\n", local.assets_bundle) 36 | destination = "$HOME/assets" 37 | } 38 | 39 | provisioner "remote-exec" { 40 | inline = [ 41 | "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", 42 | "sudo /opt/bootstrap/layout", 43 | ] 44 | } 45 | } 46 | 47 | # Secure copy kubeconfig to all workers. Activates kubelet.service 48 | resource "null_resource" "copy-worker-secrets" { 49 | count = length(var.workers) 50 | 51 | # Without depends_on, remote-exec could start and wait for machines before 52 | # matchbox groups are written, causing a deadlock. 53 | depends_on = [ 54 | matchbox_group.controller, 55 | matchbox_group.worker, 56 | ] 57 | 58 | connection { 59 | type = "ssh" 60 | host = var.workers.*.domain[count.index] 61 | user = "core" 62 | timeout = "60m" 63 | } 64 | 65 | provisioner "file" { 66 | content = module.bootstrap.kubeconfig-kubelet 67 | destination = "$HOME/kubeconfig" 68 | } 69 | 70 | provisioner "remote-exec" { 71 | inline = [ 72 | "sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig", 73 | ] 74 | } 75 | } 76 | 77 | # Connect to a controller to perform one-time cluster bootstrap. 78 | resource "null_resource" "bootstrap" { 79 | # Without depends_on, this remote-exec may start before the kubeconfig copy. 80 | # Terraform only does one task at a time, so it would try to bootstrap 81 | # while no Kubelets are running. 82 | depends_on = [ 83 | null_resource.copy-controller-secrets, 84 | null_resource.copy-worker-secrets, 85 | ] 86 | 87 | connection { 88 | type = "ssh" 89 | host = var.controllers[0].domain 90 | user = "core" 91 | timeout = "15m" 92 | } 93 | 94 | provisioner "remote-exec" { 95 | inline = [ 96 | "sudo systemctl start bootstrap", 97 | ] 98 | } 99 | } 100 | 101 | 102 | -------------------------------------------------------------------------------- /aws/container-linux/kubernetes/workers/workers.tf: -------------------------------------------------------------------------------- 1 | # Workers AutoScaling Group 2 | resource "aws_autoscaling_group" "workers" { 3 | name = "${var.name}-worker ${aws_launch_configuration.worker.name}" 4 | 5 | # count 6 | desired_capacity = var.worker_count 7 | min_size = var.worker_count 8 | max_size = var.worker_count + 2 9 | default_cooldown = 30 10 | health_check_grace_period = 30 11 | 12 | # network 13 | vpc_zone_identifier = var.subnet_ids 14 | 15 | # template 16 | launch_configuration = aws_launch_configuration.worker.name 17 | 18 | # target groups to which instances should be added 19 | target_group_arns = flatten([ 20 | aws_lb_target_group.workers-http.id, 21 | aws_lb_target_group.workers-https.id, 22 | var.target_groups, 23 | ]) 24 | 25 | lifecycle { 26 | # override the default destroy and replace update behavior 27 | create_before_destroy = true 28 | } 29 | 30 | # Waiting for instance creation delays adding the ASG to state. If instances 31 | # can't be created (e.g. spot price too low), the ASG will be orphaned. 32 | # Orphaned ASGs escape cleanup, can't be updated, and keep bidding if spot is 33 | # used. Disable wait to avoid issues and align with other clouds. 34 | wait_for_capacity_timeout = "0" 35 | 36 | tags = [ 37 | { 38 | key = "Name" 39 | value = "${var.name}-worker" 40 | propagate_at_launch = true 41 | }, 42 | ] 43 | } 44 | 45 | # Worker template 46 | resource "aws_launch_configuration" "worker" { 47 | image_id = local.ami_id 48 | instance_type = var.instance_type 49 | spot_price = var.spot_price > 0 ? var.spot_price : null 50 | enable_monitoring = false 51 | 52 | user_data = data.ct_config.worker-ignition.rendered 53 | 54 | # storage 55 | root_block_device { 56 | volume_type = var.disk_type 57 | volume_size = var.disk_size 58 | iops = var.disk_iops 59 | encrypted = true 60 | } 61 | 62 | # network 63 | security_groups = var.security_groups 64 | 65 | lifecycle { 66 | // Override the default destroy and replace update behavior 67 | create_before_destroy = true 68 | ignore_changes = [image_id] 69 | } 70 | } 71 | 72 | # Worker Ignition config 73 | data "ct_config" "worker-ignition" { 74 | content = data.template_file.worker-config.rendered 75 | strict = true 76 | snippets = var.snippets 77 | } 78 | 79 | # Worker Container Linux config 80 | data "template_file" "worker-config" { 81 | template = file("${path.module}/cl/worker.yaml") 82 | 83 | vars = { 84 | kubeconfig = indent(10, var.kubeconfig) 85 | ssh_authorized_key = var.ssh_authorized_key 86 | cluster_dns_service_ip = cidrhost(var.service_cidr, 10) 87 | cluster_domain_suffix = var.cluster_domain_suffix 88 | cgroup_driver = local.flavor == "flatcar" && local.channel == "edge" ? "systemd" : "cgroupfs" 89 | node_labels = join(",", var.node_labels) 90 | } 91 | } 92 | 93 | -------------------------------------------------------------------------------- /azure/container-linux/kubernetes/workers/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Unique name for the worker pool" 4 | } 5 | 6 | # Azure 7 | 8 | variable "region" { 9 | type = string 10 | description = "Must be set to the Azure Region of cluster" 11 | } 12 | 13 | variable "resource_group_name" { 14 | type = string 15 | description = "Must be set to the resource group name of cluster" 16 | } 17 | 18 | variable "subnet_id" { 19 | type = string 20 | description = "Must be set to the `worker_subnet_id` output by cluster" 21 | } 22 | 23 | variable "security_group_id" { 24 | type = string 25 | description = "Must be set to the `worker_security_group_id` output by cluster" 26 | } 27 | 28 | variable "backend_address_pool_id" { 29 | type = string 30 | description = "Must be set to the `worker_backend_address_pool_id` output by cluster" 31 | } 32 | 33 | # instances 34 | 35 | variable "worker_count" { 36 | type = number 37 | description = "Number of instances" 38 | default = 1 39 | } 40 | 41 | variable "vm_type" { 42 | type = string 43 | description = "Machine type for instances (see `az vm list-skus --location centralus`)" 44 | default = "Standard_DS1_v2" 45 | } 46 | 47 | variable "os_image" { 48 | type = string 49 | description = "Channel for a Container Linux derivative (flatcar-stable, flatcar-beta, flatcar-alpha, flatcar-edge, coreos-stable, coreos-beta, coreos-alpha)" 50 | default = "flatcar-stable" 51 | } 52 | 53 | variable "priority" { 54 | type = string 55 | description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time." 56 | default = "Regular" 57 | } 58 | 59 | variable "snippets" { 60 | type = list(string) 61 | description = "Container Linux Config snippets" 62 | default = [] 63 | } 64 | 65 | # configuration 66 | 67 | variable "kubeconfig" { 68 | type = string 69 | description = "Must be set to `kubeconfig` output by cluster" 70 | } 71 | 72 | variable "ssh_authorized_key" { 73 | type = string 74 | description = "SSH public key for user 'core'" 75 | } 76 | 77 | variable "service_cidr" { 78 | type = string 79 | description = <