├── .github
└── workflows
│ └── main.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── Makefile
├── README.md
├── assets
└── favicon.png
├── aws
├── _modules
│ └── eks
│ │ ├── aws_iam_authenticator.tf
│ │ ├── ingress.tf
│ │ ├── kubeconfig.tf
│ │ ├── main.tf
│ │ ├── master.tf
│ │ ├── node_pool.tf
│ │ ├── node_pool
│ │ ├── launch_template.tf
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ │ ├── openid_connect.tf
│ │ ├── outputs.tf
│ │ ├── roles_master.tf
│ │ ├── roles_worker.tf
│ │ ├── sg_masters.tf
│ │ ├── templates
│ │ └── kubeconfig.tpl
│ │ ├── variables.tf
│ │ ├── versions.tf
│ │ └── vpc.tf
├── cluster-local
│ ├── configuration.tf
│ ├── elb-dns
│ │ ├── variables.tf
│ │ └── versions.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
└── cluster
│ ├── configuration.tf
│ ├── elb-dns
│ ├── ingress.tf
│ ├── variables.tf
│ └── versions.tf
│ ├── main.tf
│ ├── node-pool
│ ├── configuration.tf
│ ├── data_sources.tf
│ ├── main.tf
│ ├── variables.tf
│ ├── versions.tf
│ └── vpc.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── azurerm
├── _modules
│ └── aks
│ │ ├── ingress.tf
│ │ ├── kubeconfig.tf
│ │ ├── log_analytics.tf
│ │ ├── main.tf
│ │ ├── node_pool
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ ├── service_principal.tf
│ │ ├── templates
│ │ └── kubeconfig.tpl
│ │ ├── variables.tf
│ │ ├── versions.tf
│ │ └── vnet.tf
├── cluster-local
│ ├── configuration.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
└── cluster
│ ├── configuration.tf
│ ├── main.tf
│ ├── node-pool
│ ├── configuration.tf
│ ├── main.tf
│ ├── variables.tf
│ └── versions.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── common
├── configuration
│ ├── outputs.tf
│ ├── tests
│ │ ├── custom_envs
│ │ │ └── test_envs.tf
│ │ ├── default_envs
│ │ │ └── test_envs.tf
│ │ └── non_string_values
│ │ │ ├── test_hash.tf
│ │ │ ├── test_list.tf
│ │ │ ├── test_object.tf
│ │ │ ├── versions.tf
│ │ │ └── wrapper
│ │ │ └── main.tf
│ ├── variables.tf
│ └── versions.tf
└── metadata
│ ├── main.tf
│ ├── outputs.tf
│ ├── tests
│ ├── custom_delimiter
│ │ └── test.tf
│ ├── custom_label_namespace
│ │ └── test.tf
│ ├── custom_workspace
│ │ └── test.tf
│ └── defaults
│ │ └── test.tf
│ ├── variables.tf
│ └── versions.tf
├── google
├── _modules
│ └── gke
│ │ ├── cluster.tf
│ │ ├── cluster_role_binding.tf
│ │ ├── ingress.tf
│ │ ├── kubeconfig.tf
│ │ ├── network.tf
│ │ ├── node_pool.tf
│ │ ├── node_pool
│ │ ├── locals.tf
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── service_account.tf
│ │ ├── variables.tf
│ │ └── versions.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ ├── service_account.tf
│ │ ├── templates
│ │ └── kubeconfig.tpl
│ │ ├── variables.tf
│ │ └── versions.tf
├── cluster-local
│ ├── configuration.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
└── cluster
│ ├── configuration.tf
│ ├── main.tf
│ ├── node-pool
│ ├── configuration.tf
│ ├── main.tf
│ ├── variables.tf
│ └── versions.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── kind
├── _modules
│ └── kind
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── versions.tf
└── cluster
│ ├── configuration.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── oci
├── Dockerfile
├── entrypoint
└── entrypoint_user
├── quickstart
├── build_artifacts
│ ├── Pipfile
│ ├── Pipfile.lock
│ └── dist.py
└── src
│ ├── configurations
│ ├── _shared
│ │ ├── .user
│ │ │ └── README.md
│ │ ├── README.md
│ │ └── tpl_gitignore
│ ├── aks
│ │ ├── .gitignore
│ │ ├── .user
│ │ ├── Dockerfile
│ │ ├── Dockerfile.loc
│ │ ├── README.md
│ │ ├── aks_zero_cluster.tf
│ │ ├── aks_zero_providers.tf
│ │ └── versions.tf
│ ├── eks
│ │ ├── .gitignore
│ │ ├── .user
│ │ ├── Dockerfile
│ │ ├── Dockerfile.loc
│ │ ├── README.md
│ │ ├── eks_zero_cluster.tf
│ │ ├── eks_zero_providers.tf
│ │ └── versions.tf
│ ├── gke
│ │ ├── .gitignore
│ │ ├── .user
│ │ ├── Dockerfile
│ │ ├── Dockerfile.loc
│ │ ├── README.md
│ │ ├── gke_zero_cluster.tf
│ │ ├── gke_zero_providers.tf
│ │ └── versions.tf
│ ├── kind
│ │ ├── .gitignore
│ │ ├── .user
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── kind_zero_cluster.tf
│ │ ├── kind_zero_providers.tf
│ │ └── versions.tf
│ └── multi-cloud
│ │ ├── .gitignore
│ │ ├── .user
│ │ ├── Dockerfile
│ │ ├── Dockerfile.loc
│ │ ├── README.md
│ │ ├── aks_zero_cluster.tf
│ │ ├── aks_zero_providers.tf
│ │ ├── eks_zero_cluster.tf
│ │ ├── eks_zero_providers.tf
│ │ ├── gke_zero_cluster.tf
│ │ ├── gke_zero_providers.tf
│ │ └── versions.tf
│ └── manifests
│ └── .gitempty
└── tests
├── .gitignore
├── aks_zero_cluster.tf
├── aks_zero_node_pools.tf
├── aks_zero_providers.tf
├── ci-cd
├── eks_zero_cluster.tf
├── eks_zero_node_pools.tf
├── eks_zero_providers.tf
├── gke_zero_cluster.tf
├── gke_zero_node_pools.tf
├── gke_zero_providers.tf
├── state.tf
└── versions.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Ignore build and dist
9 | _build/
10 | _dist/
11 |
12 | # No version locking for tests,
13 | # we want them to blow up in case
14 | # of issues
15 | tests/.terraform.lock.hcl
16 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at hello@kubestack.com. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Please follow common sense when creating issues and [the GitHub flow](https://guides.github.com/introduction/flow/) after forking when creating merge requests.
4 |
5 | ## Issues
6 |
7 | Issues are very valuable to this project.
8 |
9 | - Ideas are a valuable source of contributions others can make
10 | - Problems show where this project is lacking
11 | - With a question, you show where contributors can improve the user experience
12 |
13 | Thank you for creating them.
14 |
15 | ## Merge Requests
16 |
17 | Merge requests are a great way to get your ideas into this repository.
18 |
19 | When deciding if I merge in a merge request I look at the following things:
20 |
21 | ### Does it state intent
22 |
23 | You should be clear about which problem you're trying to solve with your
24 | contribution.
25 |
26 | For example:
27 |
28 | > Add link to code of conduct in README.md
29 |
30 | Doesn't tell me anything about why you're doing that
31 |
32 | > Add link to code of conduct in README.md because users don't always look in the CONTRIBUTING.md
33 |
34 | Tells me the problem that you have found, and the merge request shows me the action you have taken to solve it.
35 |
36 | ### Is it of good quality
37 |
38 | - There are no spelling mistakes
39 | - It reads well
40 | - For English language contributions: Has a good score on [Grammarly](grammarly.com) or [Hemingway Editor](http://www.hemingwayapp.com/)
41 |
42 | ### Does it move this repository closer to my vision for the repository
43 |
44 | The aim of this repository is documented at [README](README.md).
45 |
46 | ### Does it follow the contributor covenant
47 |
48 | This repository has a [code of conduct](./CODE_OF_CONDUCT.md), I will remove things that do not respect it.
49 |
50 | The origin of this document is from [PurpleBooth Templates](https://github.com/PurpleBooth/a-good-readme-template).
51 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | _all: dist build
2 |
3 | GIT_REF ?= $(shell echo "refs/heads/"`git rev-parse --abbrev-ref HEAD`)
4 | GIT_SHA ?= $(shell echo `git rev-parse --verify HEAD^{commit}`)
5 |
6 | DOCKER_PUSH ?= false
7 | DOCKER_TARGET ?= multi-cloud
8 |
9 | ifeq ("${DOCKER_PUSH}", "true")
10 | BUILD_PLATFORM := --platform linux/arm64,linux/amd64
11 | BUILD_CACHE_DIST := --cache-to type=registry,mode=max,ref=ghcr.io/kbst/terraform-kubestack/dev:buildcache-dist-helper,push=${DOCKER_PUSH}
12 | BUILD_OUTPUT := --output type=registry,push=${DOCKER_PUSH}
13 | BUILD_CACHE := --cache-to type=registry,mode=max,ref=ghcr.io/kbst/terraform-kubestack/dev:buildcache-${DOCKER_TARGET},push=${DOCKER_PUSH}
14 | else
15 | BUILD_PLATFORM :=
16 | BUILD_OUTPUT := --output type=docker
17 | endif
18 |
19 | dist:
20 | rm -rf quickstart/_dist
21 |
22 | docker buildx build \
23 | --build-arg GIT_REF=${GIT_REF} \
24 | --build-arg GIT_SHA=${GIT_SHA} \
25 | --file oci/Dockerfile \
26 | --output type=docker \
27 | --cache-from type=registry,ref=ghcr.io/kbst/terraform-kubestack/dev:buildcache-dist-helper \
28 | ${BUILD_CACHE_DIST} \
29 | --progress plain \
30 | -t dist-helper:latest \
31 | --target dist-helper \
32 | .
33 |
34 | docker run \
35 | --detach \
36 | --name dist-helper \
37 | --rm dist-helper:latest \
38 | sleep 600
39 |
40 | docker cp dist-helper:/quickstart/_dist quickstart/_dist
41 | docker stop dist-helper
42 |
43 | build:
44 | docker buildx build \
45 | ${BUILD_PLATFORM} \
46 | --build-arg GIT_REF=${GIT_REF} \
47 | --build-arg GIT_SHA=${GIT_SHA} \
48 | --file oci/Dockerfile \
49 | ${BUILD_OUTPUT} \
50 | --cache-from type=registry,ref=ghcr.io/kbst/terraform-kubestack/dev:buildcache-${DOCKER_TARGET} \
51 | ${BUILD_CACHE} \
52 | --progress plain \
53 | --target ${DOCKER_TARGET} \
54 | -t ghcr.io/kbst/terraform-kubestack/dev:test-$(GIT_SHA)-${DOCKER_TARGET} \
55 | .
56 |
57 | validate: .init
58 | docker exec \
59 | test-container-$(GIT_SHA) \
60 | entrypoint terraform validate
61 |
62 | test: validate
63 | docker exec \
64 | test-container-$(GIT_SHA) \
65 | entrypoint terraform apply --target module.aks_zero --target module.eks_zero --target module.gke_zero --input=false --auto-approve
66 | docker exec \
67 | test-container-$(GIT_SHA) \
68 | entrypoint terraform apply --target module.eks_zero_nginx --input=false --auto-approve
69 | docker exec \
70 | test-container-$(GIT_SHA) \
71 | entrypoint terraform apply --input=false --auto-approve
72 |
73 | cleanup: .init
74 | docker exec \
75 | -ti \
76 | test-container-$(GIT_SHA) \
77 | entrypoint terraform destroy --input=false --auto-approve
78 | $(MAKE) .stop-container
79 |
80 | shell: .check-container
81 | docker exec \
82 | -ti \
83 | test-container-$(GIT_SHA) \
84 | entrypoint bash
85 |
86 | .check-container:
87 | docker inspect test-container-${GIT_SHA} > /dev/null || $(MAKE) .run-container
88 |
89 | .run-container: build
90 | docker run \
91 | --detach \
92 | --name test-container-${GIT_SHA} \
93 | --rm \
94 | -v `pwd`:/infra:z \
95 | -e KBST_AUTH_AWS \
96 | -e KBST_AUTH_AZ \
97 | -e KBST_AUTH_GCLOUD \
98 | -e HOME=/infra/tests/.user \
99 | --workdir /infra/tests \
100 | ghcr.io/kbst/terraform-kubestack/dev:test-$(GIT_SHA)-${DOCKER_TARGET} \
101 | sleep infinity
102 |
103 | .stop-container:
104 | docker stop test-container-${GIT_SHA} || true
105 |
106 | .init: .check-container
107 | docker exec \
108 | test-container-$(GIT_SHA) \
109 | entrypoint terraform init
110 | docker exec \
111 | test-container-$(GIT_SHA) \
112 | entrypoint terraform workspace select ops
113 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Kubestack
6 | The Open Source Terraform framework for Kubernetes Platform Engineering
7 |
8 |
9 |
10 | []()
11 | [](https://github.com/kbst/terraform-kubestack/issues)
12 | [](https://github.com/kbst/terraform-kubestack/pulls)
13 |
14 |
15 |
16 |
17 |
18 | 
19 | 
20 |
21 |
22 |
23 |
24 |
25 |
26 |
33 |
34 | ## Introduction
35 |
36 | [Kubestack is a Terraform framework for Kubernetes Platform Engineering](https://www.kubestack.com) teams to define the entire cloud native stack in one Terraform code base and continuously evolve the platform safely through GitOps.
37 |
38 | ### Highlights
39 |
40 | * [Convention over configuration platform engineering framework](https://www.kubestack.com/framework/documentation/) that makes the power of platforms accessible to your whole engineering team
41 | * [Platform architecture](https://www.kubestack.com/framework/documentation/platform-architecture/) and [GitOps workflow](https://www.kubestack.com/framework/documentation/gitops-process/) enabling all team members to safely iterate while protecting your application environments
42 | * [Extendable, future-proof, low-maintenance Terraform code base](https://www.kubestack.com/framework/documentation/extending-kubestack/) and robust automation even for complex Kubernetes platforms
43 |
44 | ## Getting Started
45 |
46 | For the easiest way to get started, [follow the Kubestack tutorial](https://www.kubestack.com/framework/tutorial/).
47 | The tutorial will help you get started with the Kubestack framework and build a Kubernetes platform application teams love.
48 |
49 | ## Getting Help
50 |
51 | **Official Documentation**
52 | Refer to the [official documentation](https://www.kubestack.com/framework/documentation) for a deeper dive into how to use and configure Kubestack.
53 |
54 | **Community Help**
55 | If you have any questions while following the tutorial, join the [#kubestack](https://app.slack.com/client/T09NY5SBT/CMBCT7XRQ) channel on the Kubernetes community. To create an account request an [invitation](https://slack.k8s.io/).
56 |
57 | ## Contributing
58 |
59 | This repository holds Terraform modules in directories matching the respective provider name, e.g. [`aws`](./aws), [`azurerm`](./azurerm), [`google`](./google). Additionally [`common`](./common) holds the modules that are used for all providers.
60 | Most notably the [`metadata`](./common/metadata) module that ensures a consistent naming scheme and the `cluster_services` module which integrates Kustomize into the Terraform apply.
61 |
62 | Each cloud provider specific module directory always has a `cluster` and `_modules` directories.
63 | The cluster module is user facing and once Kubestack is out of beta the goal is to not change the module interface unless the major version changes.
64 | The cluster module then internally uses the module in `_modules` that holds the actual implementation.
65 |
66 | The [`quickstart`](./quickstart) directory is home to the source for the zip files that are used to bootstrap the user repositories when following the tutorial.
67 |
68 | The [`tests`](./tests) directory holds a set of happy path tests.
69 |
70 | Contributions to the Kubestack framework are welcome and encouraged. Before contributing, please read the [Contributing](./CONTRIBUTING.md) and [Code of Conduct](./CODE_OF_CONDUCT.md) Guidelines.
71 |
72 | One super simple way to contribute to the success of this project is to give it a star.
73 |
74 |
75 |
76 | 
77 |
78 |
79 |
80 | ## Kubestack Repositories
81 |
82 | * [kbst/terraform-kubestack](https://github.com/kbst/terraform-kubestack) (this repository)
83 | * [Terraform framework for Kubernetes Platform Engineering](https://www.kubestack.com/) teams - Define your entire cloud native Kubernetes stack in one Terraform code base and continuously evolve the platform safely through GitOps.
84 | * [kbst/kbst](https://github.com/kbst/kbst)
85 | * Kubestack CLI `kbst` - The CLI helps you scaffold the Terraform code that defines the clusters, node pools or services of your platform. The CLI works on local files only, you can see any change it makes with git status.
86 | * [kbst/terraform-provider-kustomization](https://github.com/kbst/terraform-provider-kustomization)
87 | * Kustomize Terraform Provider - A Kubestack maintained Terraform provider for Kustomize, available in the [Terraform registry](https://registry.terraform.io/providers/kbst/kustomization/latest).
88 | * [kbst/catalog](https://github.com/kbst/catalog)
89 | * Catalog of Terraform modules for Kubernetes platforms - The [Kubestack Terraform modules](https://www.kubestack.com/catalog/) make it easy for platform engineering teams to deliver common platform features in production ready configurations.
90 |
--------------------------------------------------------------------------------
/assets/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kbst/terraform-kubestack/dad56a5606bb0367ca8fb3808803e037565a2389/assets/favicon.png
--------------------------------------------------------------------------------
/aws/_modules/eks/aws_iam_authenticator.tf:
--------------------------------------------------------------------------------
1 | resource "kubernetes_config_map" "current" {
2 | metadata {
3 | name = "aws-auth"
4 | namespace = "kube-system"
5 | }
6 |
7 | data = {
8 | mapRoles = < 0
10 | ami_name = local.is_gpu ? "amazon-linux-2-gpu" : local.cpu_ami_name
11 | ami_release_prefix = local.is_gpu ? "amazon-eks-gpu-node" : "amazon-eks-node"
12 | ami_release_date = var.ami_release_version == null ? "" : split("-", var.ami_release_version)[1]
13 | ami_release_name = var.ami_release_version == null ? "recommended" : "${local.ami_release_prefix}-${var.kubernetes_version}-v${local.ami_release_date}"
14 | }
15 |
16 | data "aws_ssm_parameter" "eks_ami_release_version" {
17 | count = var.disk_size != null ? 1 : 0
18 |
19 | name = "/aws/service/eks/optimized-ami/${var.kubernetes_version}/${local.ami_name}/${local.ami_release_name}/image_id"
20 | }
21 |
22 | data "aws_ami" "eks_optimized" {
23 | count = var.disk_size != null ? 1 : 0
24 |
25 | owners = ["amazon"]
26 |
27 | filter {
28 | name = "image-id"
29 | values = [nonsensitive(data.aws_ssm_parameter.eks_ami_release_version[0].value)]
30 | }
31 | }
32 |
33 | resource "aws_launch_template" "current" {
34 | count = local.create_launch_template ? 1 : 0
35 |
36 | name = local.launch_template_name
37 |
38 | tags = merge(var.tags, var.eks_metadata_tags)
39 |
40 | dynamic "block_device_mappings" {
41 | for_each = var.disk_size != null ? toset([1]) : toset([])
42 |
43 | content {
44 | device_name = data.aws_ami.eks_optimized[0].root_device_name
45 |
46 | ebs {
47 | volume_size = var.disk_size
48 | }
49 | }
50 | }
51 |
52 | dynamic "metadata_options" {
53 | for_each = var.metadata_options != null ? toset([1]) : toset([])
54 |
55 | content {
56 | http_endpoint = var.metadata_options.http_endpoint
57 | http_tokens = var.metadata_options.http_tokens
58 | http_put_response_hop_limit = var.metadata_options.http_put_response_hop_limit
59 | http_protocol_ipv6 = var.metadata_options.http_protocol_ipv6
60 | instance_metadata_tags = var.metadata_options.instance_metadata_tags
61 | }
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/aws/_modules/eks/node_pool/main.tf:
--------------------------------------------------------------------------------
1 | data "aws_ec2_instance_type" "current" {
2 | # ami_type is always determined by the first instance_type in the list
3 | instance_type = element(tolist(var.instance_types), 0)
4 | }
5 |
6 | locals {
7 | cpu_ami_type = data.aws_ec2_instance_type.current.supported_architectures[0] == "arm64" ? "AL2_ARM_64" : "AL2_x86_64"
8 | ami_type = length(data.aws_ec2_instance_type.current.gpus) > 0 ? "AL2_x86_64_GPU" : local.cpu_ami_type
9 | }
10 |
11 | resource "aws_eks_node_group" "nodes" {
12 | cluster_name = var.cluster_name
13 | node_group_name = var.node_group_name
14 | node_role_arn = var.role_arn
15 | subnet_ids = var.subnet_ids
16 |
17 | scaling_config {
18 | desired_size = var.desired_size
19 | max_size = var.max_size
20 | min_size = var.min_size
21 | }
22 |
23 | version = var.kubernetes_version
24 | ami_type = var.ami_type == null ? local.ami_type : var.ami_type
25 | release_version = var.ami_release_version
26 | instance_types = var.instance_types
27 |
28 | disk_size = local.create_launch_template ? null : var.disk_size
29 |
30 | dynamic "launch_template" {
31 | for_each = local.create_launch_template ? toset([1]) : toset([])
32 |
33 | content {
34 | id = aws_launch_template.current[0].id
35 | version = aws_launch_template.current[0].latest_version
36 | }
37 | }
38 |
39 | tags = merge(var.tags, var.eks_metadata_tags)
40 | labels = merge(var.labels, var.metadata_labels)
41 |
42 | dynamic "taint" {
43 | for_each = var.taints
44 |
45 | content {
46 | key = taint.value["key"]
47 | value = taint.value["value"]
48 | effect = taint.value["effect"]
49 | }
50 | }
51 |
52 | depends_on = [var.depends-on-aws-auth]
53 |
54 | # when autoscaler is enabled, desired_size needs to be ignored
55 | # better would be to handle this in the resource and not require
56 | # desired_size, min_size and max_size in scaling_config
57 | lifecycle {
58 | ignore_changes = [scaling_config[0].desired_size]
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/aws/_modules/eks/node_pool/variables.tf:
--------------------------------------------------------------------------------
1 | variable "metadata_labels" {
2 | type = map(string)
3 | description = "Metadata labels to use."
4 | }
5 |
6 | variable "eks_metadata_tags" {
7 | type = map(any)
8 | description = "EKS metadata tags to use."
9 | }
10 |
11 | variable "cluster_name" {
12 | type = string
13 | description = "Cluster name of the EKS cluster."
14 | }
15 |
16 | variable "node_group_name" {
17 | type = string
18 | description = "Name for this node pool."
19 | }
20 |
21 | variable "role_arn" {
22 | type = string
23 | description = "ARN of the IAM role for worker nodes."
24 | }
25 |
26 | variable "instance_types" {
27 | type = set(string)
28 | description = "Set of AWS instance types to use for nodes."
29 | }
30 |
31 | variable "desired_size" {
32 | type = string
33 | description = "Desired number of worker nodes."
34 | }
35 |
36 | variable "max_size" {
37 | type = string
38 | description = "Maximum number of worker nodes."
39 | }
40 |
41 | variable "min_size" {
42 | type = string
43 | description = "Minimum number of worker nodes."
44 | }
45 |
46 | variable "disk_size" {
47 | type = string
48 | default = "20"
49 | description = "Will set the volume size of the root device"
50 | }
51 |
52 | variable "subnet_ids" {
53 | type = list(string)
54 | description = "List of VPC subnet IDs to use for nodes."
55 | }
56 |
57 | variable "depends-on-aws-auth" {
58 | type = map(string)
59 | description = "Used as a depends_on shim to first create the aws-auth configmap before creating the node_pool."
60 | }
61 |
62 | variable "taints" {
63 | type = set(object({
64 | key = string
65 | value = string
66 | effect = string
67 | }))
68 | description = "Kubernetes taints to set for node pool."
69 | }
70 |
71 | variable "tags" {
72 | type = map(string)
73 | description = "AWS tags to set on the node pool. Merged with Kubestack default tags."
74 | default = {}
75 | }
76 |
77 | variable "labels" {
78 | type = map(string)
79 | description = "Kubernetes labels to set on the nodes created by the node pool. Merged with Kubestack default labels."
80 | default = {}
81 | }
82 |
83 | variable "ami_type" {
84 | type = string
85 | description = "AMI type to use for nodes of the node pool."
86 | }
87 |
88 | variable "ami_release_version" {
89 | type = string
90 | description = "AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version"
91 | default = null
92 | }
93 |
94 | variable "kubernetes_version" {
95 | type = string
96 | description = "Kubernetes version to use for node pool."
97 | }
98 |
99 | variable "metadata_options" {
100 | description = "EC2 metadata service options."
101 | type = object({
102 | http_endpoint = optional(string)
103 | http_tokens = optional(string)
104 | http_put_response_hop_limit = optional(number)
105 | http_protocol_ipv6 = optional(string)
106 | instance_metadata_tags = optional(string)
107 | })
108 | }
109 |
--------------------------------------------------------------------------------
/aws/_modules/eks/node_pool/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/aws/_modules/eks/openid_connect.tf:
--------------------------------------------------------------------------------
1 | data "tls_certificate" "current" {
2 | count = var.disable_openid_connect_provider == false ? 1 : 0
3 |
4 | url = aws_eks_cluster.current.identity[0].oidc[0].issuer
5 | }
6 |
7 | resource "aws_iam_openid_connect_provider" "current" {
8 | count = var.disable_openid_connect_provider == false ? 1 : 0
9 |
10 | client_id_list = ["sts.${data.aws_partition.current.dns_suffix}"]
11 | thumbprint_list = [data.tls_certificate.current[0].certificates[0].sha1_fingerprint]
12 | url = aws_eks_cluster.current.identity[0].oidc[0].issuer
13 | }
14 |
--------------------------------------------------------------------------------
/aws/_modules/eks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "kubeconfig" {
2 | value = local.kubeconfig
3 | }
4 |
--------------------------------------------------------------------------------
/aws/_modules/eks/roles_master.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "master" {
2 | name = "${var.metadata_name}-master"
3 |
4 | assume_role_policy = < t[1] if length(t) == 2 }
58 |
59 | cluster_desired_capacity = local.cfg["cluster_desired_capacity"]
60 |
61 | cluster_max_size = local.cfg["cluster_max_size"]
62 |
63 | cluster_min_size = local.cfg["cluster_min_size"]
64 |
65 | cluster_version = lookup(local.cfg, "cluster_version", null)
66 |
67 | worker_root_device_volume_size = lookup(local.cfg, "worker_root_device_volume_size", null)
68 | worker_root_device_encrypted = lookup(local.cfg, "worker_root_device_encrypted", null)
69 |
70 | worker_ami_release_version = lookup(local.cfg, "worker_ami_release_version", null)
71 |
72 | cluster_aws_auth_map_roles = lookup(local.cfg, "cluster_aws_auth_map_roles", "")
73 | cluster_aws_auth_map_users = lookup(local.cfg, "cluster_aws_auth_map_users", "")
74 | cluster_aws_auth_map_accounts = lookup(local.cfg, "cluster_aws_auth_map_accounts", "")
75 |
76 | disable_default_ingress = lookup(local.cfg, "disable_default_ingress", false)
77 |
78 | enabled_cluster_log_types_lookup = lookup(local.cfg, "enabled_cluster_log_types", "api,audit,authenticator,controllerManager,scheduler")
79 | enabled_cluster_log_types = compact(split(",", local.enabled_cluster_log_types_lookup))
80 |
81 | disable_openid_connect_provider = lookup(local.cfg, "disable_openid_connect_provider", false)
82 |
83 | cluster_endpoint_private_access = lookup(local.cfg, "cluster_endpoint_private_access", false)
84 | cluster_endpoint_public_access = lookup(local.cfg, "cluster_endpoint_public_access", true)
85 | cluster_public_access_cidrs_lookup = lookup(local.cfg, "cluster_public_access_cidrs", null)
86 | cluster_public_access_cidrs = local.cluster_public_access_cidrs_lookup == null ? null : split(",", local.cluster_public_access_cidrs_lookup)
87 | cluster_service_cidr = lookup(local.cfg, "cluster_service_cidr", null)
88 |
89 | cluster_encryption_key_arn = lookup(local.cfg, "cluster_encryption_key_arn", null)
90 | }
91 |
--------------------------------------------------------------------------------
/aws/cluster/elb-dns/ingress.tf:
--------------------------------------------------------------------------------
1 | data "kubernetes_service" "current" {
2 | metadata {
3 | name = var.ingress_service_name
4 | namespace = var.ingress_service_namespace
5 | }
6 | }
7 |
8 | data "aws_route53_zone" "current" {
9 | name = "${var.metadata_fqdn}."
10 | }
11 |
12 | data "aws_elb_hosted_zone_id" "current" {
13 | count = var.using_nlb ? 0 : 1
14 | }
15 |
16 | # this is a workaround as aws_elb_hosted_zone_id doesn't support NLBs
17 | # ref: https://github.com/hashicorp/terraform-provider-aws/issues/7988
18 | data "aws_lb" "current" {
19 | count = var.using_nlb ? 1 : 0
20 |
21 | name = split("-", data.kubernetes_service.current.status[0].load_balancer[0].ingress[0].hostname).0
22 | }
23 |
24 | resource "aws_route53_record" "host" {
25 | zone_id = data.aws_route53_zone.current.zone_id
26 | name = var.metadata_fqdn
27 | type = "A"
28 |
29 | alias {
30 | name = data.kubernetes_service.current.status[0].load_balancer[0].ingress[0].hostname
31 | zone_id = var.using_nlb ? data.aws_lb.current[0].zone_id : data.aws_elb_hosted_zone_id.current[0].id
32 | evaluate_target_health = true
33 | }
34 | }
35 |
36 | resource "aws_route53_record" "wildcard" {
37 | zone_id = data.aws_route53_zone.current.zone_id
38 | name = "*.${var.metadata_fqdn}"
39 | type = "A"
40 |
41 | alias {
42 | name = data.kubernetes_service.current.status[0].load_balancer[0].ingress[0].hostname
43 | zone_id = var.using_nlb ? data.aws_lb.current[0].zone_id : data.aws_elb_hosted_zone_id.current[0].id
44 | evaluate_target_health = true
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/aws/cluster/elb-dns/variables.tf:
--------------------------------------------------------------------------------
1 | variable "ingress_service_name" {
2 | type = string
3 | description = "Metadata name of the ingress service."
4 | }
5 |
6 | variable "ingress_service_namespace" {
7 | type = string
8 | description = "Metadata namespace of the ingress service."
9 | }
10 |
11 | variable "metadata_fqdn" {
12 | type = string
13 | description = "Cluster module FQDN."
14 | }
15 |
16 | variable "using_nlb" {
17 | type = bool
18 | default = false
19 | description = "Whether the ingress uses NLB or classic ELB."
20 | }
21 |
--------------------------------------------------------------------------------
/aws/cluster/elb-dns/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | }
6 |
7 | kubernetes = {
8 | source = "hashicorp/kubernetes"
9 | }
10 | }
11 |
12 | required_version = ">= 0.13"
13 | }
14 |
--------------------------------------------------------------------------------
/aws/cluster/main.tf:
--------------------------------------------------------------------------------
1 | data "aws_region" "current" {
2 | }
3 |
4 | module "cluster_metadata" {
5 | source = "../../common/metadata"
6 |
7 | name_prefix = local.name_prefix
8 | base_domain = local.base_domain
9 |
10 | provider_name = "aws"
11 | provider_region = data.aws_region.current.name
12 | }
13 |
14 | module "cluster" {
15 | source = "../_modules/eks"
16 |
17 | metadata_name = module.cluster_metadata.name
18 | metadata_fqdn = module.cluster_metadata.fqdn
19 | metadata_labels = module.cluster_metadata.labels
20 |
21 | availability_zones = local.cluster_availability_zones
22 |
23 | vpc_cidr = local.cluster_vpc_cidr
24 | vpc_control_subnet_newbits = local.cluster_vpc_control_subnet_newbits
25 | vpc_dns_hostnames = local.cluster_vpc_dns_hostnames
26 | vpc_dns_support = local.cluster_vpc_dns_support
27 | vpc_node_subnet_newbits = local.cluster_vpc_node_subnet_newbits
28 | vpc_node_subnet_number_offset = local.cluster_vpc_node_subnet_number_offset
29 | vpc_legacy_node_subnets = local.cluster_vpc_legacy_node_subnets
30 | vpc_subnet_map_public_ip = local.cluster_vpc_subnet_map_public_ip
31 |
32 | instance_types = local.cluster_instance_types
33 | desired_capacity = local.cluster_desired_capacity
34 | max_size = local.cluster_max_size
35 | min_size = local.cluster_min_size
36 | cluster_version = local.cluster_version
37 |
38 | metadata_options = local.metadata_options
39 |
40 | root_device_encrypted = local.worker_root_device_encrypted
41 | root_device_volume_size = local.worker_root_device_volume_size
42 |
43 | additional_node_tags = local.cluster_additional_node_tags
44 |
45 | aws_auth_map_roles = local.cluster_aws_auth_map_roles
46 | aws_auth_map_users = local.cluster_aws_auth_map_users
47 | aws_auth_map_accounts = local.cluster_aws_auth_map_accounts
48 |
49 | disable_default_ingress = local.disable_default_ingress
50 |
51 | enabled_cluster_log_types = local.enabled_cluster_log_types
52 |
53 | disable_openid_connect_provider = local.disable_openid_connect_provider
54 |
55 | cluster_endpoint_private_access = local.cluster_endpoint_private_access
56 | cluster_endpoint_public_access = local.cluster_endpoint_public_access
57 | cluster_public_access_cidrs = local.cluster_public_access_cidrs
58 | cluster_service_cidr = local.cluster_service_cidr
59 |
60 | cluster_encryption_key_arn = local.cluster_encryption_key_arn
61 |
62 | worker_ami_release_version = local.worker_ami_release_version
63 |
64 | # cluster module configuration is still map(string)
65 | # once module_variable_optional_attrs isn't experimental anymore
66 | # we can migrate cluster module configuration to map(object(...))
67 | taints = toset([])
68 | }
69 |
--------------------------------------------------------------------------------
/aws/cluster/node-pool/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "github.com/kbst/terraform-kubestack//common/configuration?ref=v0.15.1-beta.1"
3 | configuration = var.configuration
4 | base_key = var.configuration_base_key
5 | }
6 |
7 | locals {
8 | cfg = lookup(module.configuration.merged, terraform.workspace)
9 |
10 | name = local.cfg["name"]
11 |
12 | instance_types_lookup = local.cfg["instance_types"] == null ? "" : local.cfg["instance_types"]
13 | instance_types = toset(split(",", local.instance_types_lookup))
14 | ami_release_version = lookup(local.cfg, "ami_release_version")
15 | desired_capacity = lookup(local.cfg, "desired_capacity")
16 | min_size = lookup(local.cfg, "min_size")
17 | max_size = lookup(local.cfg, "max_size")
18 | disk_size = lookup(local.cfg, "disk_size", null)
19 |
20 | ami_type = lookup(local.cfg, "ami_type")
21 |
22 | metadata_options = lookup(local.cfg, "metadata_options", null)
23 |
24 | availability_zones_lookup = local.cfg["availability_zones"] == null ? "" : local.cfg["availability_zones"]
25 | availability_zones = compact(split(",", local.availability_zones_lookup))
26 |
27 | az_subnet_ids = length(data.aws_subnets.current) == 1 ? data.aws_subnets.current[0].ids : []
28 | default_subnet_ids = length(data.aws_subnets.current) == 1 ? local.az_subnet_ids : tolist(data.aws_eks_node_group.default.subnet_ids)
29 | vpc_subnet_ids = local.cfg["vpc_subnet_ids"] == null ? local.default_subnet_ids : split(",", local.cfg["vpc_subnet_ids"])
30 | vpc_secondary_cidr = lookup(local.cfg, "vpc_secondary_cidr", null)
31 | vpc_subnet_newbits = lookup(local.cfg, "vpc_subnet_newbits", null)
32 | vpc_subnet_number_offset = local.cfg["vpc_subnet_number_offset"] == null ? 1 : local.cfg["vpc_subnet_number_offset"]
33 | vpc_subnet_map_public_ip = lookup(local.cfg, "vpc_subnet_map_public_ip", null)
34 |
35 | taints = local.cfg["taints"] == null ? toset([]) : local.cfg["taints"]
36 |
37 | tags = local.cfg["tags"]
38 |
39 | labels = local.cfg["labels"]
40 | }
41 |
--------------------------------------------------------------------------------
/aws/cluster/node-pool/data_sources.tf:
--------------------------------------------------------------------------------
1 | data "aws_eks_cluster" "current" {
2 | name = var.cluster_name
3 | }
4 |
5 | data "aws_eks_node_group" "default" {
6 | cluster_name = data.aws_eks_cluster.current.name
7 | node_group_name = var.cluster_default_node_pool_name
8 | }
9 |
10 | data "aws_default_tags" "current" {}
11 |
12 | data "aws_vpc" "current" {
13 | id = data.aws_eks_cluster.current.vpc_config[0].vpc_id
14 | }
15 |
16 | data "aws_subnets" "current" {
17 | count = length(local.availability_zones) > 0 ? 1 : 0
18 |
19 | filter {
20 | name = "vpc-id"
21 | values = [data.aws_vpc.current.id]
22 | }
23 |
24 | # if the node pool is in one or more specific AZs
25 | # only link subnet_ids belonging to these AZs
26 | filter {
27 | name = "availability-zone"
28 | values = local.availability_zones
29 | }
30 |
31 | # exclude control plane subnets
32 | filter {
33 | name = "subnet-id"
34 | values = tolist(data.aws_eks_node_group.default.subnet_ids)
35 | }
36 | }
37 |
38 | data "aws_internet_gateway" "current" {
39 | count = local.vpc_subnet_newbits == null ? 0 : local.vpc_subnet_map_public_ip == false ? 0 : 1
40 |
41 | filter {
42 | name = "attachment.vpc-id"
43 | values = [data.aws_vpc.current.id]
44 | }
45 | }
46 |
47 | data "aws_nat_gateway" "current" {
48 | count = local.vpc_subnet_newbits == null ? 0 : local.vpc_subnet_map_public_ip == false ? length(local.availability_zones) : 0
49 |
50 | vpc_id = data.aws_vpc.current.id
51 |
52 | tags = {
53 | "kubestack.com/cluster_name" = data.aws_eks_cluster.current.name
54 | "kubestack.com/cluster_provider_zone" = local.availability_zones[count.index]
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/aws/cluster/node-pool/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | // if provider level tags are set, the node_group data source tags attr
3 | // includes the resource level and provider level tags
4 | // we have to exclude the provider level tags when setting them for node pools below
5 | node_group_tag_keys = toset(keys(data.aws_eks_node_group.default.tags))
6 | provider_level_tag_keys = toset(keys(data.aws_default_tags.current.tags))
7 | tags_without_all_tags_keys = setsubtract(local.node_group_tag_keys, local.provider_level_tag_keys)
8 | tags_without_all_tags = { for k in local.tags_without_all_tags_keys : k => data.aws_eks_node_group.default.tags[k] }
9 | }
10 |
11 | module "node_pool" {
12 | source = "../../_modules/eks/node_pool"
13 |
14 | cluster_name = data.aws_eks_cluster.current.name
15 | metadata_labels = data.aws_eks_node_group.default.labels
16 | eks_metadata_tags = local.tags_without_all_tags
17 | role_arn = data.aws_eks_node_group.default.node_role_arn
18 |
19 | node_group_name = local.name
20 |
21 | subnet_ids = local.vpc_subnet_newbits == null ? local.vpc_subnet_ids : aws_subnet.current.*.id
22 |
23 | instance_types = local.instance_types
24 | ami_release_version = local.ami_release_version
25 | desired_size = local.desired_capacity
26 | max_size = local.max_size
27 | min_size = local.min_size
28 |
29 | ami_type = local.ami_type
30 |
31 | kubernetes_version = data.aws_eks_cluster.current.version
32 |
33 | disk_size = local.disk_size
34 |
35 | metadata_options = local.metadata_options
36 |
37 | taints = local.taints
38 |
39 | tags = local.tags
40 |
41 | labels = local.labels
42 |
43 | depends-on-aws-auth = null
44 | }
45 |
--------------------------------------------------------------------------------
/aws/cluster/node-pool/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(object({
3 |
4 | name = optional(string)
5 |
6 | instance_types = optional(string)
7 | ami_release_version = optional(string)
8 | desired_capacity = optional(string)
9 | min_size = optional(string)
10 | max_size = optional(string)
11 | disk_size = optional(string)
12 |
13 | ami_type = optional(string)
14 |
15 | metadata_options = optional(object({
16 | http_endpoint = optional(string)
17 | http_tokens = optional(string)
18 | http_put_response_hop_limit = optional(number)
19 | http_protocol_ipv6 = optional(string)
20 | instance_metadata_tags = optional(string)
21 | }))
22 |
23 | availability_zones = optional(string)
24 |
25 | vpc_subnet_ids = optional(string)
26 |
27 | vpc_secondary_cidr = optional(string)
28 |
29 | vpc_subnet_newbits = optional(string)
30 | vpc_subnet_number_offset = optional(string)
31 | vpc_subnet_map_public_ip = optional(bool)
32 |
33 | taints = optional(set(object({
34 | key = string
35 | value = string
36 | effect = string
37 | })))
38 |
39 | tags = optional(map(string))
40 |
41 | labels = optional(map(string))
42 | }))
43 | description = "Map with per workspace module configuration."
44 | }
45 |
46 | variable "configuration_base_key" {
47 | type = string
48 | description = "The key in the configuration map all other keys inherit from."
49 | default = "apps"
50 | }
51 |
52 | variable "cluster_name" {
53 | type = string
54 | description = "Name of the cluster to attach the node pool to."
55 | }
56 | variable "cluster_default_node_pool_name" {
57 | type = string
58 | description = "Name of the cluster's default node pool to inherit IAM role and subnets from."
59 | default = "default"
60 | }
61 |
--------------------------------------------------------------------------------
/aws/cluster/node-pool/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | }
6 | }
7 |
8 | required_version = ">= 1.3.0"
9 | }
10 |
--------------------------------------------------------------------------------
/aws/cluster/node-pool/vpc.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc_ipv4_cidr_block_association" "current" {
2 | count = local.vpc_secondary_cidr == null ? 0 : 1
3 |
4 | vpc_id = data.aws_vpc.current.id
5 | cidr_block = local.vpc_secondary_cidr
6 | }
7 |
8 | locals {
9 | subnet_cidr = length(aws_vpc_ipv4_cidr_block_association.current) == 1 ? aws_vpc_ipv4_cidr_block_association.current[0].cidr_block : data.aws_vpc.current.cidr_block
10 | }
11 |
12 | resource "aws_subnet" "current" {
13 | count = local.vpc_subnet_newbits == null ? 0 : length(local.availability_zones)
14 |
15 | availability_zone = local.availability_zones[count.index]
16 | cidr_block = cidrsubnet(
17 | local.subnet_cidr,
18 | local.vpc_subnet_newbits,
19 | local.vpc_subnet_number_offset + count.index
20 | )
21 | vpc_id = data.aws_vpc.current.id
22 | map_public_ip_on_launch = local.vpc_subnet_map_public_ip == null ? true : local.vpc_subnet_map_public_ip
23 |
24 | tags = data.aws_eks_node_group.default.tags
25 | }
26 |
27 | resource "aws_route_table" "current" {
28 | count = local.vpc_subnet_newbits == null ? 0 : length(local.availability_zones)
29 |
30 | vpc_id = data.aws_vpc.current.id
31 | }
32 |
33 | resource "aws_route" "current" {
34 | count = local.vpc_subnet_newbits == null ? 0 : length(local.availability_zones)
35 |
36 | route_table_id = aws_route_table.current[count.index].id
37 |
38 | gateway_id = local.vpc_subnet_map_public_ip == false ? null : data.aws_internet_gateway.current[0].id
39 | nat_gateway_id = local.vpc_subnet_map_public_ip == false ? data.aws_nat_gateway.current[count.index].id : null
40 | destination_cidr_block = "0.0.0.0/0"
41 | }
42 |
43 | resource "aws_route_table_association" "current" {
44 | count = local.vpc_subnet_newbits == null ? 0 : length(local.availability_zones)
45 |
46 | subnet_id = aws_subnet.current[count.index].id
47 | route_table_id = aws_route_table.current[count.index].id
48 | }
49 |
--------------------------------------------------------------------------------
/aws/cluster/outputs.tf:
--------------------------------------------------------------------------------
1 | output "current_config" {
2 | value = module.configuration.merged[terraform.workspace]
3 | }
4 |
5 | output "current_metadata" {
6 | value = module.cluster_metadata
7 | }
8 |
9 | output "kubeconfig" {
10 | value = module.cluster.kubeconfig
11 | }
12 |
--------------------------------------------------------------------------------
/aws/cluster/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(map(string))
3 | description = "Map with per workspace cluster configuration."
4 | }
5 |
6 | variable "configuration_base_key" {
7 | type = string
8 | description = "The key in the configuration map all other keys inherit from."
9 | default = "apps"
10 | }
11 |
--------------------------------------------------------------------------------
/aws/cluster/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | }
6 |
7 | kubernetes = {
8 | source = "hashicorp/kubernetes"
9 | }
10 | }
11 |
12 | required_version = ">= 0.13"
13 | }
14 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/ingress.tf:
--------------------------------------------------------------------------------
1 | resource "azurerm_public_ip" "current" {
2 | count = var.disable_default_ingress ? 0 : 1
3 |
4 | name = var.metadata_name
5 | location = azurerm_kubernetes_cluster.current.location
6 | resource_group_name = azurerm_kubernetes_cluster.current.node_resource_group
7 | allocation_method = "Static"
8 | sku = "Standard"
9 |
10 | zones = var.default_ingress_ip_zones
11 |
12 | tags = var.metadata_labels
13 |
14 | depends_on = [azurerm_kubernetes_cluster.current]
15 | }
16 |
17 | resource "azurerm_dns_zone" "current" {
18 | count = var.disable_default_ingress ? 0 : 1
19 |
20 | name = var.metadata_fqdn
21 | resource_group_name = data.azurerm_resource_group.current.name
22 |
23 | tags = var.metadata_labels
24 | }
25 |
26 | resource "azurerm_dns_a_record" "host" {
27 | count = var.disable_default_ingress ? 0 : 1
28 |
29 | name = "@"
30 | zone_name = azurerm_dns_zone.current[0].name
31 | resource_group_name = data.azurerm_resource_group.current.name
32 | ttl = 300
33 | records = [azurerm_public_ip.current[0].ip_address]
34 |
35 | tags = var.metadata_labels
36 | }
37 |
38 | resource "azurerm_dns_a_record" "wildcard" {
39 | count = var.disable_default_ingress ? 0 : 1
40 |
41 | name = "*"
42 | zone_name = azurerm_dns_zone.current[0].name
43 | resource_group_name = data.azurerm_resource_group.current.name
44 | ttl = 300
45 | records = [azurerm_public_ip.current[0].ip_address]
46 |
47 | tags = var.metadata_labels
48 | }
49 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/kubeconfig.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | template_vars = {
3 | cluster_name = azurerm_kubernetes_cluster.current.name
4 | cluster_endpoint = azurerm_kubernetes_cluster.current.kube_config[0].host
5 | cluster_ca = azurerm_kubernetes_cluster.current.kube_config[0].cluster_ca_certificate
6 | client_cert = azurerm_kubernetes_cluster.current.kube_config[0].client_certificate
7 | client_key = azurerm_kubernetes_cluster.current.kube_config[0].client_key
8 | path_cwd = path.cwd
9 | }
10 |
11 | kubeconfig = templatefile("${path.module}/templates/kubeconfig.tpl", local.template_vars)
12 | }
13 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/log_analytics.tf:
--------------------------------------------------------------------------------
1 | resource "azurerm_log_analytics_workspace" "current" {
2 | count = var.enable_log_analytics == true ? 1 : 0
3 | name = var.metadata_name
4 | location = data.azurerm_resource_group.current.location
5 | resource_group_name = data.azurerm_resource_group.current.name
6 | sku = "PerGB2018"
7 |
8 | tags = var.metadata_labels
9 | }
10 |
11 | resource "azurerm_log_analytics_solution" "current" {
12 | count = var.enable_log_analytics == true ? 1 : 0
13 | solution_name = "ContainerInsights"
14 | location = data.azurerm_resource_group.current.location
15 | resource_group_name = data.azurerm_resource_group.current.name
16 | workspace_resource_id = azurerm_log_analytics_workspace.current[0].id
17 | workspace_name = azurerm_log_analytics_workspace.current[0].name
18 |
19 | plan {
20 | publisher = "Microsoft"
21 | product = "OMSGallery/ContainerInsights"
22 | }
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/main.tf:
--------------------------------------------------------------------------------
1 | data "azurerm_resource_group" "current" {
2 | name = var.resource_group
3 | }
4 |
5 | resource "azurerm_kubernetes_cluster" "current" {
6 | name = var.metadata_name
7 | location = data.azurerm_resource_group.current.location
8 | resource_group_name = data.azurerm_resource_group.current.name
9 | dns_prefix = var.dns_prefix
10 | sku_tier = var.sku_tier
11 | kubernetes_version = var.kubernetes_version
12 | automatic_upgrade_channel = var.automatic_channel_upgrade
13 |
14 | role_based_access_control_enabled = true
15 |
16 | default_node_pool {
17 | name = var.default_node_pool_name
18 | type = var.default_node_pool_type
19 |
20 | auto_scaling_enabled = var.default_node_pool_enable_auto_scaling
21 |
22 | # set min and max count only if autoscaling is _enabled_
23 | min_count = var.default_node_pool_enable_auto_scaling ? var.default_node_pool_min_count : null
24 | max_count = var.default_node_pool_enable_auto_scaling ? var.default_node_pool_max_count : null
25 |
26 | # set node count only if auto scaling is _disabled_
27 | node_count = var.default_node_pool_enable_auto_scaling ? null : var.default_node_pool_node_count
28 |
29 | vm_size = var.default_node_pool_vm_size
30 | os_disk_size_gb = var.default_node_pool_os_disk_size_gb
31 |
32 | vnet_subnet_id = var.network_plugin == "azure" ? azurerm_subnet.current[0].id : null
33 | max_pods = var.max_pods
34 |
35 | only_critical_addons_enabled = var.default_node_pool_only_critical_addons
36 |
37 | zones = var.availability_zones
38 |
39 | upgrade_settings {
40 | max_surge = var.upgade_settings_max_surge
41 | drain_timeout_in_minutes = var.upgade_settings_drain_timeout_in_minutes
42 | node_soak_duration_in_minutes = var.upgade_settings_node_soak_duration_in_minutes
43 | }
44 | }
45 |
46 | network_profile {
47 | network_plugin = var.network_plugin
48 | network_policy = var.network_policy
49 |
50 | service_cidr = var.service_cidr
51 | dns_service_ip = var.dns_service_ip
52 | pod_cidr = var.network_plugin == "azure" ? null : var.pod_cidr
53 | }
54 |
55 | dynamic "identity" {
56 | for_each = var.disable_managed_identities == true ? toset([]) : toset([1])
57 |
58 | content {
59 | type = var.user_assigned_identity_id == null ? "SystemAssigned" : "UserAssigned"
60 |
61 | identity_ids = var.user_assigned_identity_id == null ? null : [var.user_assigned_identity_id]
62 | }
63 | }
64 |
65 | dynamic "service_principal" {
66 | for_each = var.disable_managed_identities == true ? toset([1]) : toset([])
67 |
68 | content {
69 | client_id = azuread_application.current[0].application_id
70 | client_secret = azuread_service_principal_password.current[0].value
71 | }
72 | }
73 |
74 | azure_policy_enabled = var.enable_azure_policy_agent
75 |
76 | dynamic "oms_agent" {
77 | for_each = var.enable_log_analytics ? toset([1]) : toset([])
78 |
79 | content {
80 | log_analytics_workspace_id = var.enable_log_analytics ? azurerm_log_analytics_workspace.current[0].id : null
81 | }
82 | }
83 |
84 | dynamic "workload_autoscaler_profile" {
85 | for_each = var.keda_enabled || var.vertical_pod_autoscaler_enabled ? toset([1]) : toset([])
86 |
87 | content {
88 | keda_enabled = var.keda_enabled
89 | vertical_pod_autoscaler_enabled = var.vertical_pod_autoscaler_enabled
90 | }
91 | }
92 |
93 | tags = var.metadata_labels
94 | }
95 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/node_pool/main.tf:
--------------------------------------------------------------------------------
1 | data "azurerm_kubernetes_cluster" "current" {
2 | name = var.cluster_name
3 | resource_group_name = var.resource_group
4 | }
5 |
6 | locals {
7 | vnet_subnets = compact(data.azurerm_kubernetes_cluster.current.agent_pool_profile[*].vnet_subnet_id)
8 | }
9 |
10 | resource "azurerm_kubernetes_cluster_node_pool" "current" {
11 | name = var.node_pool_name
12 | kubernetes_cluster_id = data.azurerm_kubernetes_cluster.current.id
13 | auto_scaling_enabled = var.enable_auto_scaling
14 | max_count = var.max_count
15 | min_count = var.min_count
16 | node_count = var.node_count
17 | vm_size = var.vm_size
18 | node_labels = var.node_labels
19 | node_taints = var.node_taints
20 | zones = var.availability_zones
21 | max_pods = var.max_pods
22 | os_disk_type = var.os_disk_type
23 | os_disk_size_gb = var.os_disk_size_gb
24 | priority = var.priority
25 | eviction_policy = var.eviction_policy
26 | spot_max_price = var.max_spot_price
27 |
28 | # The data source returned agent_pool_profiles in some configurations contain
29 | # empty strings in vnet_subnet_id. In that case we rely on the defaults
30 | vnet_subnet_id = length(local.vnet_subnets) == 0 ? null : coalesce(tolist(local.vnet_subnets)...)
31 |
32 | # When autoscaling acts, the node_count gets changed, but it should not be
33 | # forced to match the config
34 | lifecycle {
35 | ignore_changes = [node_count]
36 | }
37 | }
38 |
39 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/node_pool/variables.tf:
--------------------------------------------------------------------------------
1 | variable "cluster_name" {
2 | type = string
3 | description = "Name of the cluster"
4 | }
5 |
6 | variable "resource_group" {
7 | type = string
8 | description = "Resource group of the cluster."
9 | }
10 |
11 | variable "node_pool_name" {
12 | type = string
13 | description = "Name of the node pool."
14 | }
15 |
16 | variable "enable_auto_scaling" {
17 | type = bool
18 | description = "Whether to enable auto scaling for the node pool. Defaults to true"
19 | }
20 |
21 | variable "max_count" {
22 | # type = optional(string)
23 | description = "Max number of nodes in the pool"
24 | }
25 |
26 | variable "min_count" {
27 | # type = optional(string)
28 | description = "Min number of nodes in the pool"
29 | }
30 |
31 | variable "node_count" {
32 | type = string
33 | description = "Static number of nodes in the pool"
34 | }
35 |
36 | variable "max_pods" {
37 | type = string
38 | description = "Maximum number of pods per node"
39 | }
40 |
41 | variable "vm_size" {
42 | type = string
43 | description = "VM size to use for the nodes in the pool"
44 | }
45 |
46 | variable "os_disk_type" {
47 | type = string
48 | description = "The type of disk which should be used for the Operating System. Possible values are Ephemeral and Managed. Defaults to Managed. Changing this forces a new resource to be created."
49 | }
50 |
51 | variable "os_disk_size_gb" {
52 | # type = optional(string)
53 | description = "The Agent Operating System disk size in GB. Changing this forces a new resource to be created."
54 | }
55 |
56 | variable "eviction_policy" {
57 | # type = optional(string)
58 | description = "Eviction policy for when using spot instances. Possible values are 'Deallocate' and 'Delete'."
59 | }
60 |
61 | variable "priority" {
62 | type = string
63 | description = "Whether to use spot instances. Possible values are 'Spot' and 'Regular'"
64 | }
65 |
66 | variable "max_spot_price" {
67 | # type = optional(string)
68 | description = "The maximum desired spot price, or -1 for the current on-demand price"
69 | }
70 |
71 | variable "node_labels" {
72 | # type = optional(map(string))
73 | description = "The labels that should be added to the nodes"
74 | }
75 |
76 | variable "node_taints" {
77 | # type = optional(list(string))
78 | description = "The node taints that should be automatically applied"
79 | }
80 |
81 | variable "availability_zones" {
82 | # type = optional(list(string))
83 | description = "The list of availability zones to create the node pool in"
84 | }
85 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/node_pool/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.14"
3 | }
4 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "aks_vnet" {
2 | value = length(azurerm_virtual_network.current) > 0 ? azurerm_virtual_network.current[0] : null
3 | }
4 |
5 | output "kubeconfig" {
6 | sensitive = true
7 | value = local.kubeconfig
8 | }
9 |
10 | output "default_ingress_ip" {
11 | value = length(azurerm_public_ip.current) > 0 ? azurerm_public_ip.current[0].ip_address : null
12 | }
13 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/provider.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | features {}
3 | }
4 |
5 | provider "kubernetes" {
6 | alias = "aks"
7 |
8 | host = azurerm_kubernetes_cluster.current.kube_config[0].host
9 | client_certificate = base64decode(
10 | azurerm_kubernetes_cluster.current.kube_config[0].client_certificate,
11 | )
12 | client_key = base64decode(azurerm_kubernetes_cluster.current.kube_config[0].client_key)
13 | cluster_ca_certificate = base64decode(
14 | azurerm_kubernetes_cluster.current.kube_config[0].cluster_ca_certificate,
15 | )
16 | }
17 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/service_principal.tf:
--------------------------------------------------------------------------------
1 | resource "azuread_application" "current" {
2 | count = var.disable_managed_identities == true ? 1 : 0
3 |
4 | display_name = var.metadata_name
5 | }
6 |
7 | resource "azuread_service_principal" "current" {
8 | count = var.disable_managed_identities == true ? 1 : 0
9 |
10 | client_id = azuread_application.current[0].client_id
11 | }
12 |
13 | resource "azuread_service_principal_password" "current" {
14 | count = var.disable_managed_identities == true ? 1 : 0
15 |
16 | service_principal_id = azuread_service_principal.current[0].id
17 | }
18 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/templates/kubeconfig.tpl:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | clusters:
3 | - cluster:
4 | server: ${cluster_endpoint}
5 | certificate-authority-data: ${cluster_ca}
6 | name: ${cluster_name}
7 | contexts:
8 | - context:
9 | cluster: ${cluster_name}
10 | user: ${cluster_name}
11 | name: ${cluster_name}
12 | current-context: ${cluster_name}
13 | kind: Config
14 | preferences: {}
15 | users:
16 | - name: ${cluster_name}
17 | user:
18 | client-certificate-data: ${client_cert}
19 | client-key-data: ${client_key}
20 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_providers {
4 | azurerm = {
5 | # https://registry.terraform.io/providers/hashicorp/azurerm/latest
6 | source = "hashicorp/azurerm"
7 | version = ">= 3.4.0"
8 | }
9 |
10 | azuread = {
11 | # https://registry.terraform.io/providers/hashicorp/azuread/latest
12 | source = "hashicorp/azuread"
13 | version = ">= 1.3.0"
14 | }
15 | }
16 |
17 | required_version = ">= 0.13"
18 | }
19 |
--------------------------------------------------------------------------------
/azurerm/_modules/aks/vnet.tf:
--------------------------------------------------------------------------------
1 |
2 | resource "azurerm_virtual_network" "current" {
3 | count = var.network_plugin == "azure" ? 1 : 0
4 |
5 | name = var.legacy_vnet_name ? "vnet-aks-${terraform.workspace}-cluster" : var.metadata_name
6 | address_space = var.vnet_address_space
7 | resource_group_name = data.azurerm_resource_group.current.name
8 | location = data.azurerm_resource_group.current.location
9 | }
10 |
11 | resource "azurerm_subnet" "current" {
12 | count = var.network_plugin == "azure" ? 1 : 0
13 |
14 | name = var.legacy_vnet_name ? "aks-node-subnet" : "${var.metadata_name}-${var.default_node_pool_name}-node-pool"
15 | address_prefixes = var.subnet_address_prefixes
16 | resource_group_name = data.azurerm_resource_group.current.name
17 | virtual_network_name = azurerm_virtual_network.current[0].name
18 |
19 | service_endpoints = length(var.subnet_service_endpoints) > 0 ? var.subnet_service_endpoints : null
20 | }
21 |
--------------------------------------------------------------------------------
/azurerm/cluster-local/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "../../common/configuration"
3 |
4 | configuration = var.configuration
5 | base_key = var.configuration_base_key
6 | }
7 |
8 | locals {
9 | # current workspace config
10 | cfg = module.configuration.merged[terraform.workspace]
11 |
12 | name_prefix = local.cfg["name_prefix"]
13 |
14 | base_domain = local.cfg["base_domain"]
15 |
16 | # on Azure the region is determined by resource group
17 | # in the local implementation we don't have access to that
18 | # to still support multi-region setups locally, we hash the resource group name
19 | # and use that as the region part of the cluster name prefixed with aks-
20 | resource_group = local.cfg["resource_group"]
21 | fake_region_hash = substr(sha256(local.resource_group), 0, 7)
22 | fake_region = "aks-${local.fake_region_hash}"
23 |
24 | http_port_default = terraform.workspace == "apps" ? 80 : 8080
25 | http_port = lookup(local.cfg, "http_port", local.http_port_default)
26 |
27 | https_port_default = terraform.workspace == "apps" ? 443 : 8443
28 | https_port = lookup(local.cfg, "https_port", local.https_port_default)
29 |
30 | disable_default_ingress = lookup(local.cfg, "disable_default_ingress", false)
31 |
32 | node_image = lookup(local.cfg, "node_image", null)
33 |
34 | node_count = lookup(local.cfg, "default_node_pool_min_count", "1")
35 | nodes = [
36 | for node, _ in range(local.node_count) :
37 | "worker"
38 | ]
39 | extra_nodes = join(",", local.nodes)
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/azurerm/cluster-local/main.tf:
--------------------------------------------------------------------------------
1 | module "cluster_metadata" {
2 | source = "../../common/metadata"
3 |
4 | name_prefix = local.name_prefix
5 | base_domain = local.base_domain
6 |
7 | provider_name = "azure"
8 | provider_region = local.fake_region
9 |
10 | # Azure does not allow / character in labels
11 | label_namespace = "kubestack.com-"
12 | }
13 |
14 | module "cluster" {
15 | source = "../../kind/_modules/kind"
16 |
17 | metadata_name = module.cluster_metadata.name
18 | metadata_fqdn = module.cluster_metadata.fqdn
19 | metadata_tags = module.cluster_metadata.tags
20 | metadata_labels = module.cluster_metadata.labels
21 |
22 | node_image = local.node_image
23 | extra_nodes = local.extra_nodes
24 |
25 | http_port = local.http_port
26 | https_port = local.https_port
27 |
28 | disable_default_ingress = local.disable_default_ingress
29 | }
30 |
--------------------------------------------------------------------------------
/azurerm/cluster-local/outputs.tf:
--------------------------------------------------------------------------------
1 | output "current_config" {
2 | value = module.configuration.merged[terraform.workspace]
3 | }
4 |
5 | output "current_metadata" {
6 | value = module.cluster_metadata
7 | }
8 |
9 | output "kubeconfig" {
10 | sensitive = true
11 | value = module.cluster.kubeconfig
12 | }
13 |
14 | output "default_ingress_ip" {
15 | # the cluster module returns an IP as a string
16 | # we YAML encode null for cluster-local to provide
17 | # a unified output to consumers
18 | value = yamlencode(null)
19 | }
20 |
--------------------------------------------------------------------------------
/azurerm/cluster-local/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(map(string))
3 | description = "Map with per workspace cluster configuration."
4 | }
5 |
6 | variable "configuration_base_key" {
7 | type = string
8 | description = "The key in the configuration map all other keys inherit from."
9 | default = "apps"
10 | }
11 |
--------------------------------------------------------------------------------
/azurerm/cluster-local/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.13"
4 | }
5 |
--------------------------------------------------------------------------------
/azurerm/cluster/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "../../common/configuration"
3 |
4 | configuration = var.configuration
5 | base_key = var.configuration_base_key
6 | }
7 |
8 | locals {
9 | # current workspace config
10 | cfg = module.configuration.merged[terraform.workspace]
11 |
12 | name_prefix = local.cfg["name_prefix"]
13 |
14 | base_domain = local.cfg["base_domain"]
15 |
16 | resource_group = local.cfg["resource_group"]
17 |
18 | dns_prefix = lookup(local.cfg, "dns_prefix", "api")
19 |
20 | sku_tier = lookup(local.cfg, "sku_tier", "Free")
21 |
22 | legacy_vnet_name = lookup(local.cfg, "legacy_vnet_name", false)
23 | vnet_address_space = split(",", lookup(local.cfg, "vnet_address_space", "10.0.0.0/8"))
24 | subnet_address_prefixes = split(",", lookup(local.cfg, "subnet_address_prefixes", "10.1.0.0/16"))
25 |
26 | subnet_service_endpoints_lookup = lookup(local.cfg, "subnet_service_endpoints", "")
27 | subnet_service_endpoints = local.subnet_service_endpoints_lookup != "" ? split(",", local.subnet_service_endpoints_lookup) : []
28 |
29 | network_plugin = lookup(local.cfg, "network_plugin", "kubenet")
30 | network_policy = lookup(local.cfg, "network_policy", "calico")
31 | service_cidr = lookup(local.cfg, "service_cidr", "10.0.0.0/16")
32 | dns_service_ip = lookup(local.cfg, "dns_service_ip", "10.0.0.10")
33 | pod_cidr = lookup(local.cfg, "pod_cidr", "10.244.0.0/16")
34 | max_pods = lookup(local.cfg, "max_pods", null)
35 |
36 | default_node_pool_name = lookup(local.cfg, "default_node_pool_name", "default")
37 | default_node_pool_type = lookup(local.cfg, "default_node_pool_type", "VirtualMachineScaleSets")
38 |
39 | default_node_pool_enable_auto_scaling = lookup(local.cfg, "default_node_pool_enable_auto_scaling", true)
40 | default_node_pool_min_count = lookup(local.cfg, "default_node_pool_min_count", "1")
41 | default_node_pool_max_count = lookup(local.cfg, "default_node_pool_max_count", "1")
42 | default_node_pool_node_count = lookup(local.cfg, "default_node_pool_node_count", "1")
43 |
44 | default_node_pool_vm_size = lookup(local.cfg, "default_node_pool_vm_size", "Standard_B2s")
45 | default_node_pool_only_critical_addons = lookup(local.cfg, "default_node_pool_only_critical_addons", false)
46 | default_node_pool_os_disk_size_gb = lookup(local.cfg, "default_node_pool_os_disk_size_gb", "30")
47 |
48 | disable_default_ingress = lookup(local.cfg, "disable_default_ingress", false)
49 |
50 | default_ingress_ip_zones_lookup = lookup(local.cfg, "default_ingress_ip_zones", "")
51 | default_ingress_ip_zones = local.default_ingress_ip_zones_lookup != "" ? split(",", local.default_ingress_ip_zones_lookup) : []
52 |
53 | enable_azure_policy_agent = lookup(local.cfg, "enable_azure_policy_agent", false)
54 |
55 | disable_managed_identities = lookup(local.cfg, "disable_managed_identities", false)
56 | user_assigned_identity_id = lookup(local.cfg, "user_assigned_identity_id", null)
57 |
58 | enable_log_analytics = lookup(local.cfg, "enable_log_analytics", true)
59 |
60 | kubernetes_version = lookup(local.cfg, "kubernetes_version", null)
61 | automatic_channel_upgrade = lookup(local.cfg, "automatic_channel_upgrade", null)
62 |
63 | availability_zones_lookup = lookup(local.cfg, "availability_zones", "")
64 | availability_zones = local.availability_zones_lookup != "" ? split(",", local.availability_zones_lookup) : []
65 |
66 | additional_metadata_labels_lookup = lookup(local.cfg, "additional_metadata_labels", "")
67 | additional_metadata_labels_tuples = [for t in split(",", local.additional_metadata_labels_lookup) : split("=", t)]
68 | additional_metadata_labels = { for t in local.additional_metadata_labels_tuples : t[0] => t[1] if length(t) == 2 }
69 |
70 | keda_enabled = lookup(local.cfg, "keda_enabled", false)
71 | vertical_pod_autoscaler_enabled = lookup(local.cfg, "vertical_pod_autoscaler_enabled", false)
72 |
73 | upgade_settings_max_surge = lookup(local.cfg, "upgade_settings_max_surge", "10%")
74 | upgade_settings_drain_timeout_in_minutes = lookup(local.cfg, "upgade_settings_drain_timeout_in_minutes", 0)
75 | upgade_settings_node_soak_duration_in_minutes = lookup(local.cfg, "upgade_settings_node_soak_duration_in_minutes", 0)
76 | }
77 |
--------------------------------------------------------------------------------
/azurerm/cluster/main.tf:
--------------------------------------------------------------------------------
1 | data "azurerm_resource_group" "current" {
2 | name = local.resource_group
3 | }
4 |
5 | module "cluster_metadata" {
6 | source = "../../common/metadata"
7 |
8 | name_prefix = local.name_prefix
9 | base_domain = local.base_domain
10 |
11 | provider_name = "azure"
12 | provider_region = data.azurerm_resource_group.current.location
13 |
14 | # Azure does not allow / character in labels
15 | label_namespace = "kubestack.com-"
16 | }
17 |
18 | module "cluster" {
19 | source = "../_modules/aks"
20 |
21 | resource_group = local.resource_group
22 |
23 | metadata_name = module.cluster_metadata.name
24 | metadata_fqdn = module.cluster_metadata.fqdn
25 | metadata_labels = merge(module.cluster_metadata.labels, local.additional_metadata_labels)
26 | metadata_label_namespace = module.cluster_metadata.label_namespace
27 |
28 | dns_prefix = local.dns_prefix
29 |
30 | sku_tier = local.sku_tier
31 |
32 | legacy_vnet_name = local.legacy_vnet_name
33 | vnet_address_space = local.vnet_address_space
34 | subnet_address_prefixes = local.subnet_address_prefixes
35 | subnet_service_endpoints = local.subnet_service_endpoints
36 |
37 | network_plugin = local.network_plugin
38 | network_policy = local.network_policy
39 | service_cidr = local.service_cidr
40 | dns_service_ip = local.dns_service_ip
41 | pod_cidr = local.pod_cidr
42 | max_pods = local.max_pods
43 |
44 | default_node_pool_name = local.default_node_pool_name
45 | default_node_pool_type = local.default_node_pool_type
46 |
47 | default_node_pool_enable_auto_scaling = local.default_node_pool_enable_auto_scaling
48 | default_node_pool_min_count = local.default_node_pool_min_count
49 | default_node_pool_max_count = local.default_node_pool_max_count
50 | default_node_pool_node_count = local.default_node_pool_node_count
51 |
52 | default_node_pool_only_critical_addons = local.default_node_pool_only_critical_addons
53 | default_node_pool_vm_size = local.default_node_pool_vm_size
54 | default_node_pool_os_disk_size_gb = local.default_node_pool_os_disk_size_gb
55 |
56 | disable_default_ingress = local.disable_default_ingress
57 | default_ingress_ip_zones = local.default_ingress_ip_zones
58 |
59 | enable_azure_policy_agent = local.enable_azure_policy_agent
60 |
61 | disable_managed_identities = local.disable_managed_identities
62 | user_assigned_identity_id = local.user_assigned_identity_id
63 |
64 | kubernetes_version = local.kubernetes_version
65 | automatic_channel_upgrade = local.automatic_channel_upgrade
66 | enable_log_analytics = local.enable_log_analytics
67 |
68 | availability_zones = local.availability_zones
69 |
70 | keda_enabled = local.keda_enabled
71 | vertical_pod_autoscaler_enabled = local.vertical_pod_autoscaler_enabled
72 |
73 | upgade_settings_max_surge = local.upgade_settings_max_surge
74 | upgade_settings_drain_timeout_in_minutes = local.upgade_settings_drain_timeout_in_minutes
75 | upgade_settings_node_soak_duration_in_minutes = local.upgade_settings_node_soak_duration_in_minutes
76 | }
77 |
--------------------------------------------------------------------------------
/azurerm/cluster/node-pool/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "../../../common/configuration"
3 |
4 | configuration = var.configuration
5 | base_key = var.configuration_base_key
6 | }
7 |
8 | locals {
9 | cfg = module.configuration.merged[terraform.workspace]
10 |
11 | node_pool_name = local.cfg["node_pool_name"]
12 | vm_size = local.cfg["vm_size"] != null ? local.cfg["vm_size"] : "Standard_B2s"
13 | max_pods = local.cfg["max_pods"] != null ? local.cfg["max_pods"] : "110"
14 | os_disk_type = local.cfg["os_disk_type"] != null ? local.cfg["os_disk_type"] : "Managed"
15 | os_disk_size_gb = local.cfg["os_disk_size_gb"]
16 | availability_zones_list = local.cfg["availability_zones"] != null ? local.cfg["availability_zones"] : []
17 | availability_zones = length(local.availability_zones_list) == 0 ? null : local.availability_zones_list
18 |
19 | enable_auto_scaling = local.cfg["enable_auto_scaling"] != null ? local.cfg["enable_auto_scaling"] : true
20 | max_count_string = local.cfg["max_count"] != null ? local.cfg["max_count"] : "1"
21 | min_count_string = local.cfg["min_count"] != null ? local.cfg["min_count"] : "1"
22 | max_count = local.enable_auto_scaling ? local.max_count_string : null
23 | min_count = local.enable_auto_scaling ? local.min_count_string : null
24 | node_count = local.cfg["node_count"] != null ? local.cfg["node_count"] : "1"
25 |
26 | use_spot = local.cfg["use_spot"] != null ? local.cfg["use_spot"] : false
27 | priority = local.use_spot ? "Spot" : "Regular"
28 | eviction_policy = local.use_spot ? local.cfg["eviction_policy"] : null
29 | max_spot_price_value = local.cfg["max_spot_price"] != null ? local.cfg["max_spot_price"] : "-1"
30 | max_spot_price = local.use_spot ? local.max_spot_price_value : null
31 |
32 | user_node_labels = local.cfg["node_labels"] != null ? local.cfg["node_labels"] : {}
33 | user_node_taints = local.cfg["node_taints"] != null ? local.cfg["node_taints"] : []
34 | spot_taints = [
35 | "kubernetes.azure.com/scalesetpriority=spot:NoSchedule",
36 | ]
37 | spot_labels = { "kubernetes.azure.com/scalesetpriority" = "spot" }
38 |
39 | taints = compact(concat(local.use_spot ? local.spot_taints : [], local.user_node_taints))
40 | labels = merge(local.use_spot ? local.spot_labels : {}, local.user_node_labels)
41 | node_labels = length(local.labels) == 0 ? null : local.labels
42 | node_taints = length(local.taints) == 0 ? null : local.taints
43 | }
44 |
--------------------------------------------------------------------------------
/azurerm/cluster/node-pool/main.tf:
--------------------------------------------------------------------------------
1 | module "node_pool" {
2 | source = "../../_modules/aks/node_pool"
3 |
4 | cluster_name = var.cluster_name
5 | resource_group = var.resource_group
6 |
7 | node_pool_name = local.node_pool_name
8 | enable_auto_scaling = local.enable_auto_scaling
9 | max_count = local.max_count
10 | min_count = local.min_count
11 | node_count = local.node_count
12 | vm_size = local.vm_size
13 | eviction_policy = local.eviction_policy
14 | priority = local.priority
15 | max_spot_price = local.max_spot_price
16 | node_labels = local.node_labels
17 | node_taints = local.node_taints
18 | availability_zones = local.availability_zones
19 | os_disk_type = local.os_disk_type
20 | os_disk_size_gb = local.os_disk_size_gb
21 | max_pods = local.max_pods
22 | }
23 |
--------------------------------------------------------------------------------
/azurerm/cluster/node-pool/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(object({
3 | node_pool_name = optional(string)
4 | vm_size = optional(string)
5 | node_count = optional(string)
6 |
7 | enable_auto_scaling = optional(bool)
8 | max_count = optional(string)
9 | min_count = optional(string)
10 | eviction_policy = optional(string)
11 |
12 | max_pods = optional(string)
13 | os_disk_type = optional(string)
14 | os_disk_size_gb = optional(string)
15 |
16 | use_spot = optional(bool)
17 | max_spot_price = optional(string)
18 |
19 | node_labels = optional(map(string))
20 | node_taints = optional(list(string))
21 | availability_zones = optional(list(string))
22 | }))
23 | description = "Map with per workspace node pool configuration."
24 | }
25 |
26 | variable "configuration_base_key" {
27 | type = string
28 | description = "The key in the configuration map all other keys inherit from."
29 | default = "apps"
30 | }
31 |
32 | variable "cluster_name" {
33 | type = string
34 | description = "The name of the cluster to attach the node pool to"
35 | }
36 |
37 | variable "resource_group" {
38 | type = string
39 | description = "The resource group of the cluster to attach the node pool to"
40 | }
41 |
--------------------------------------------------------------------------------
/azurerm/cluster/node-pool/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azure = {
4 | source = "hashicorp/azurerm"
5 | }
6 | }
7 |
8 | required_version = ">= 1.3.0"
9 | }
10 |
--------------------------------------------------------------------------------
/azurerm/cluster/outputs.tf:
--------------------------------------------------------------------------------
1 | output "aks_vnet" {
2 | value = module.cluster.aks_vnet
3 | }
4 |
5 | output "current_config" {
6 | value = module.configuration.merged[terraform.workspace]
7 | }
8 |
9 | output "current_metadata" {
10 | value = module.cluster_metadata
11 | }
12 |
13 | output "kubeconfig" {
14 | sensitive = true
15 | value = module.cluster.kubeconfig
16 | }
17 |
18 | output "default_ingress_ip" {
19 | value = module.cluster.default_ingress_ip
20 | }
21 |
--------------------------------------------------------------------------------
/azurerm/cluster/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(map(string))
3 | description = "Map with per workspace cluster configuration."
4 | }
5 |
6 | variable "configuration_base_key" {
7 | type = string
8 | description = "The key in the configuration map all other keys inherit from."
9 | default = "apps"
10 | }
11 |
--------------------------------------------------------------------------------
/azurerm/cluster/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.13"
4 | }
5 |
--------------------------------------------------------------------------------
/common/configuration/outputs.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | base_config = var.configuration[var.base_key]
3 |
4 | merged = {
5 | for env_key, env in var.configuration :
6 | env_key => {
7 | # loop through all config keys in base_key environment and current env
8 | # if current env has that key, use the value from current env
9 | # if not, use the value from the base_key environment
10 | for key in setunion(keys(env), keys(local.base_config)) :
11 | key => lookup(env, key, null) != null ? env[key] : local.base_config[key]
12 | }
13 | }
14 | }
15 |
16 | output "merged" {
17 | value = local.merged
18 | }
19 |
--------------------------------------------------------------------------------
/common/configuration/tests/custom_envs/test_envs.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 | }
8 |
9 | module "mut" {
10 | source = "../.."
11 |
12 | configuration = {
13 | apps_production = {
14 | apps_key1 = "from_apps_production"
15 | apps_key2 = "from_apps_production"
16 | }
17 |
18 | apps_staging = {
19 | apps_key1 = "from_apps_staging"
20 | apps_key2 = "from_apps_staging"
21 | }
22 |
23 | ops = {
24 | ops_key = "from_ops"
25 | apps_key1 = "from_ops"
26 | }
27 |
28 | loc = {}
29 | }
30 |
31 | base_key = "apps_production"
32 | }
33 |
34 | resource "test_assertions" "apps_production" {
35 | component = "apps_production"
36 |
37 | equal "scheme" {
38 | description = "apps_production is unchanged"
39 | got = module.mut.merged["apps_production"]
40 | want = {
41 | apps_key1 = "from_apps_production"
42 | apps_key2 = "from_apps_production"
43 | }
44 | }
45 | }
46 |
47 | resource "test_assertions" "apps_staging" {
48 | component = "apps_staging"
49 |
50 | equal "scheme" {
51 | description = "apps_staging overwrites everything from apps_production"
52 | got = module.mut.merged["apps_staging"]
53 | want = {
54 | apps_key1 = "from_apps_staging"
55 | apps_key2 = "from_apps_staging"
56 | }
57 | }
58 | }
59 |
60 | resource "test_assertions" "ops" {
61 | component = "ops"
62 |
63 | equal "scheme" {
64 | description = "ops inherits from apps, overwrites apps_key1 and adds ops_key"
65 | got = module.mut.merged["ops"]
66 | want = {
67 | apps_key1 = "from_ops"
68 | apps_key2 = "from_apps_production"
69 | ops_key = "from_ops"
70 | }
71 | }
72 | }
73 |
74 | resource "test_assertions" "loc" {
75 | component = "loc"
76 |
77 | equal "scheme" {
78 | description = "loc inherits from apps, nulls apps_key2, and adds loc_key"
79 | got = module.mut.merged["loc"]
80 | want = {
81 | apps_key1 = "from_apps_production"
82 | apps_key2 = "from_apps_production"
83 | }
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/common/configuration/tests/default_envs/test_envs.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 | }
8 |
9 | module "mut" {
10 | source = "../.."
11 |
12 | configuration = {
13 | apps = {
14 | apps_key1 = "from_apps"
15 | apps_key2 = "from_apps"
16 | }
17 |
18 | ops = {
19 | ops_key = "from_ops"
20 | apps_key1 = "from_ops"
21 | }
22 |
23 | loc = {
24 | loc_key = "from_loc"
25 | }
26 | }
27 |
28 | base_key = "apps"
29 | }
30 |
31 | resource "test_assertions" "apps" {
32 | component = "apps"
33 |
34 | equal "scheme" {
35 | description = "apps is unchanged"
36 | got = module.mut.merged["apps"]
37 | want = {
38 | apps_key1 = "from_apps"
39 | apps_key2 = "from_apps"
40 | }
41 | }
42 | }
43 |
44 | resource "test_assertions" "ops" {
45 | component = "ops"
46 |
47 | equal "scheme" {
48 | description = "ops inherits from apps, overwrites apps_key1 and adds ops_key"
49 | got = module.mut.merged["ops"]
50 | want = {
51 | apps_key1 = "from_ops"
52 | apps_key2 = "from_apps"
53 | ops_key = "from_ops"
54 | }
55 | }
56 | }
57 |
58 | resource "test_assertions" "loc" {
59 | component = "loc"
60 |
61 | equal "scheme" {
62 | description = "loc inherits from apps and adds loc_key"
63 | got = module.mut.merged["loc"]
64 | want = {
65 | apps_key1 = "from_apps"
66 | apps_key2 = "from_apps"
67 | loc_key = "from_loc"
68 | }
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/common/configuration/tests/non_string_values/test_hash.tf:
--------------------------------------------------------------------------------
1 | module "mut_hash" {
2 | source = "../.."
3 |
4 | configuration = {
5 | apps = {
6 | test_hash = {
7 | "from_apps_1" = "from_apps_1"
8 | "from_apps_2" = "from_apps_2"
9 | }
10 | }
11 |
12 | ops = {
13 | test_hash = {
14 | "from_ops_1" = "from_ops_1"
15 | }
16 | }
17 |
18 | loc = {
19 | }
20 | }
21 |
22 | base_key = "apps"
23 | }
24 |
25 | resource "test_assertions" "overwrite_hash" {
26 | component = "overwrite_hash"
27 |
28 | equal "scheme" {
29 | description = "can overwrite hashes"
30 | got = module.mut_hash.merged
31 | want = {
32 | "apps" = {
33 | "test_hash" = tomap({
34 | "from_apps_1" = "from_apps_1"
35 | "from_apps_2" = "from_apps_2"
36 | })
37 | }
38 | "loc" = {
39 | "test_hash" = tomap({
40 | "from_apps_1" = "from_apps_1"
41 | "from_apps_2" = "from_apps_2"
42 | })
43 | }
44 | "ops" = {
45 | "test_hash" = tomap({
46 | "from_ops_1" = "from_ops_1"
47 | })
48 | }
49 | }
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/common/configuration/tests/non_string_values/test_list.tf:
--------------------------------------------------------------------------------
1 | module "mut_list" {
2 | source = "../.."
3 |
4 | configuration = {
5 | apps = {
6 | test_list = [
7 | "from_apps_1",
8 | "from_apps_2"
9 | ]
10 | }
11 |
12 | ops = {
13 | test_list = [
14 | "from_ops_1"
15 | ]
16 | }
17 |
18 | loc = {
19 | }
20 | }
21 |
22 | base_key = "apps"
23 | }
24 |
25 | resource "test_assertions" "overwrite_list" {
26 | component = "overwrite_list"
27 |
28 | equal "scheme" {
29 | description = "can overwrite lists"
30 | got = module.mut_list.merged
31 | want = {
32 | "apps" = {
33 | "test_list" = tolist([
34 | "from_apps_1",
35 | "from_apps_2",
36 | ])
37 | }
38 | "loc" = {
39 | "test_list" = tolist([
40 | "from_apps_1",
41 | "from_apps_2",
42 | ])
43 | }
44 | "ops" = {
45 | "test_list" = tolist([
46 | "from_ops_1",
47 | ])
48 | }
49 | }
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/common/configuration/tests/non_string_values/test_object.tf:
--------------------------------------------------------------------------------
1 | module "mut_object" {
2 | source = "./wrapper"
3 | }
4 |
5 | resource "test_assertions" "overwrite_object" {
6 | component = "overwrite_object"
7 |
8 | equal "scheme" {
9 | description = "can overwrite objects"
10 | got = module.mut_object.merged
11 | want = {
12 | "apps" = {
13 | "test_list_object" = tolist([
14 | {
15 | "key" = "from_apps"
16 | },
17 | ])
18 | "test_list_string" = tolist([
19 | "from_apps",
20 | ])
21 | "test_map_string" = tomap({
22 | "key" = "from_apps"
23 | })
24 | "test_object" = {
25 | "key" = "from_apps"
26 | }
27 | "test_string" = "from_apps"
28 | }
29 | "loc" = {
30 | "test_list_object" = tolist([
31 | {
32 | "key" = "from_loc"
33 | },
34 | ])
35 | "test_list_string" = tolist([
36 | "from_apps",
37 | ])
38 | "test_map_string" = tomap({
39 | "key" = "from_apps"
40 | })
41 | "test_object" = {
42 | "key" = "from_apps"
43 | }
44 | "test_string" = "from_apps"
45 | }
46 | "ops" = {
47 | "test_list_object" = tolist([
48 | {
49 | "key" = "from_ops"
50 | },
51 | ])
52 | "test_list_string" = tolist([
53 | "from_ops",
54 | ])
55 | "test_map_string" = tomap({
56 | "key" = "from_ops"
57 | })
58 | "test_object" = {
59 | "key" = "from_ops"
60 | }
61 | "test_string" = "from_ops"
62 | }
63 | }
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/common/configuration/tests/non_string_values/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 |
8 | required_version = ">= 1.3.0"
9 | }
10 |
--------------------------------------------------------------------------------
/common/configuration/tests/non_string_values/wrapper/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3.0"
3 | }
4 |
5 | variable "test_configuration" {
6 | type = map(object({
7 | test_string = optional(string)
8 | test_list_string = optional(list(string))
9 | test_map_string = optional(map(string))
10 | test_list_object = optional(list(object({
11 | key = string
12 | })))
13 | test_object = optional(object({
14 | key = string
15 | }))
16 | }))
17 | description = "Map with per workspace module configuration."
18 | default = {
19 | apps = {
20 | test_string = "from_apps"
21 |
22 | test_list_string = [
23 | "from_apps"
24 | ]
25 |
26 | test_map_string = {
27 | key = "from_apps"
28 | }
29 |
30 | test_list_object = [{
31 | key = "from_apps"
32 | }]
33 |
34 | test_object = {
35 | key = "from_apps"
36 | }
37 | }
38 |
39 | ops = {
40 | test_string = "from_ops"
41 |
42 | test_list_string = [
43 | "from_ops"
44 | ]
45 |
46 | test_map_string = {
47 | key = "from_ops"
48 | }
49 |
50 | test_list_object = [{
51 | key = "from_ops"
52 | }]
53 |
54 | test_object = {
55 | key = "from_ops"
56 | }
57 | }
58 |
59 | loc = {
60 | test_list_object = [{
61 | key = "from_loc"
62 | }]
63 | }
64 | }
65 | }
66 |
67 | module "mut_object" {
68 | source = "../../.."
69 |
70 | configuration = var.test_configuration
71 |
72 | base_key = "apps"
73 | }
74 |
75 | output "merged" {
76 | value = module.mut_object.merged
77 | }
78 |
--------------------------------------------------------------------------------
/common/configuration/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(any)
3 | description = "Map with per workspace cluster configuration."
4 | }
5 |
6 | variable "base_key" {
7 | type = string
8 | description = "The key in the configuration map all other keys inherit from."
9 | }
10 |
--------------------------------------------------------------------------------
/common/configuration/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/common/metadata/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | name_prefix = var.name_prefix # e.g. kbst
3 | workspace = var.workspace != "" ? var.workspace : terraform.workspace # ops or apps defaults to the selected terraform workspace
4 | provider_name = var.provider_name # e.g. gcp
5 | provider_region = var.provider_region # e.g. europe-west3
6 | base_domain = var.base_domain # e.g. infra.example.com
7 |
8 | name_delimiter = var.delimiter # use dash as delimiter by default
9 | dns_delimiter = "." # dns uses a dot as delimiter
10 |
11 | # [name_prefix]-[workspace]-[provider_region]
12 | # e.g. kbst-ops-europe-west3
13 | name_parts = [local.name_prefix, local.workspace, local.provider_region]
14 |
15 | name = join(local.name_delimiter, compact(local.name_parts))
16 |
17 | # [provider_name].[base_domain]
18 | # e.g. gcp.infra.example.com
19 | split_base_domain = split(local.dns_delimiter, local.base_domain)
20 |
21 | domain_parts = concat([local.provider_name], compact(local.split_base_domain))
22 |
23 | domain = join(local.dns_delimiter, local.domain_parts)
24 |
25 | # [name_prefix]-[workspace]-[provider_region].[provider_name].[base_domain]
26 | # e.g. kbst-ops-europe-west3.gcp.infra.example.com
27 | fqdn_parts = [local.name, local.domain]
28 |
29 | fqdn = join(local.dns_delimiter, local.fqdn_parts)
30 |
31 | labels = {
32 | "${var.label_namespace}cluster_name" = local.name
33 | "${var.label_namespace}cluster_domain" = local.domain
34 | "${var.label_namespace}cluster_fqdn" = local.fqdn
35 | "${var.label_namespace}cluster_workspace" = local.workspace
36 | "${var.label_namespace}cluster_provider_name" = local.provider_name
37 | "${var.label_namespace}cluster_provider_region" = local.provider_region
38 | }
39 |
40 | tags = [
41 | local.name,
42 | local.workspace,
43 | local.provider_name,
44 | local.provider_region,
45 | ]
46 | }
47 |
48 |
--------------------------------------------------------------------------------
/common/metadata/outputs.tf:
--------------------------------------------------------------------------------
1 | output "name" {
2 | value = local.name
3 | }
4 |
5 | output "domain" {
6 | value = local.domain
7 | }
8 |
9 | output "fqdn" {
10 | value = local.fqdn
11 | }
12 |
13 | output "labels" {
14 | value = local.labels
15 | }
16 |
17 | output "label_namespace" {
18 | value = var.label_namespace
19 | }
20 |
21 | output "tags" {
22 | value = local.tags
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/common/metadata/tests/custom_delimiter/test.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 | }
8 |
9 | module "mut" {
10 | source = "../.."
11 |
12 | name_prefix = "testn"
13 | base_domain = "testd.example.com"
14 | provider_name = "testp"
15 | provider_region = "testr"
16 | delimiter = ""
17 | }
18 |
19 | locals {
20 | exp_name = "testntest_custom_delimitertestr"
21 | exp_domain = "testp.testd.example.com"
22 | exp_fqdn = "testntest_custom_delimitertestr.testp.testd.example.com"
23 | exp_workspace = "test_custom_delimiter"
24 | exp_provider_name = "testp"
25 | exp_provider_region = "testr"
26 | }
27 |
28 | resource "test_assertions" "name" {
29 | component = "name"
30 |
31 | equal "scheme" {
32 | description = "name concatenates name_prefix, workspace and provider_region"
33 | got = module.mut.name
34 | want = local.exp_name
35 | }
36 | }
37 |
38 | resource "test_assertions" "domain" {
39 | component = "domain"
40 |
41 | equal "scheme" {
42 | description = "domain concatenates name and base_domain"
43 | got = module.mut.domain
44 | want = local.exp_domain
45 | }
46 | }
47 |
48 | resource "test_assertions" "fqdn" {
49 | component = "fqdn"
50 |
51 | equal "scheme" {
52 | description = "fqdn concatenates name and domain"
53 | got = module.mut.fqdn
54 | want = local.exp_fqdn
55 | }
56 | }
57 |
58 | resource "test_assertions" "labels" {
59 | component = "labels"
60 |
61 | equal "scheme" {
62 | description = "labels have correct key/value pairs"
63 | got = module.mut.labels
64 | want = {
65 | "kubestack.com/cluster_name" = local.exp_name
66 | "kubestack.com/cluster_domain" = local.exp_domain
67 | "kubestack.com/cluster_fqdn" = local.exp_fqdn
68 | "kubestack.com/cluster_workspace" = local.exp_workspace
69 | "kubestack.com/cluster_provider_name" = local.exp_provider_name
70 | "kubestack.com/cluster_provider_region" = local.exp_provider_region
71 | }
72 | }
73 | }
74 |
75 | resource "test_assertions" "label_namespace" {
76 | component = "label_namespace"
77 |
78 | equal "scheme" {
79 | description = "returns the used label_namespace"
80 | got = module.mut.label_namespace
81 | want = "kubestack.com/"
82 | }
83 | }
84 |
85 | resource "test_assertions" "tags" {
86 | component = "tags"
87 |
88 | equal "scheme" {
89 | description = "returns the used label_namespace"
90 | got = module.mut.tags
91 | want = [
92 | local.exp_name,
93 | local.exp_workspace,
94 | local.exp_provider_name,
95 | local.exp_provider_region,
96 | ]
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/common/metadata/tests/custom_label_namespace/test.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 | }
8 |
9 | module "mut" {
10 | source = "../.."
11 |
12 | name_prefix = "testn"
13 | base_domain = "testd.example.com"
14 | provider_name = "testp"
15 | provider_region = "testr"
16 | label_namespace = "testlns-"
17 | }
18 |
19 | locals {
20 | exp_name = "testn-test_custom_label_namespace-testr"
21 | exp_domain = "testp.testd.example.com"
22 | exp_fqdn = "testn-test_custom_label_namespace-testr.testp.testd.example.com"
23 | exp_workspace = "test_custom_label_namespace"
24 | exp_provider_name = "testp"
25 | exp_provider_region = "testr"
26 | exp_label_namespace = "testlns-"
27 | }
28 |
29 | resource "test_assertions" "labels" {
30 | component = "labels"
31 |
32 | equal "scheme" {
33 | description = "labels have correct key/value pairs"
34 | got = module.mut.labels
35 | want = {
36 | "${local.exp_label_namespace}cluster_name" = local.exp_name
37 | "${local.exp_label_namespace}cluster_domain" = local.exp_domain
38 | "${local.exp_label_namespace}cluster_fqdn" = local.exp_fqdn
39 | "${local.exp_label_namespace}cluster_workspace" = local.exp_workspace
40 | "${local.exp_label_namespace}cluster_provider_name" = local.exp_provider_name
41 | "${local.exp_label_namespace}cluster_provider_region" = local.exp_provider_region
42 | }
43 | }
44 | }
45 |
46 | resource "test_assertions" "label_namespace" {
47 | component = "label_namespace"
48 |
49 | equal "scheme" {
50 | description = "returns the used label_namespace"
51 | got = module.mut.label_namespace
52 | want = local.exp_label_namespace
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/common/metadata/tests/custom_workspace/test.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 | }
8 |
9 | module "mut" {
10 | source = "../.."
11 |
12 | name_prefix = "testn"
13 | base_domain = "testd.example.com"
14 | provider_name = "testp"
15 | provider_region = "testr"
16 | workspace = "testw"
17 | }
18 |
19 | locals {
20 | exp_workspace = "testw"
21 | exp_name = "testn-${local.exp_workspace}-testr"
22 | }
23 |
24 | resource "test_assertions" "name" {
25 | component = "name"
26 |
27 | equal "scheme" {
28 | description = "name concatenates name_prefix, workspace and provider_region"
29 | got = module.mut.name
30 | want = local.exp_name
31 | }
32 | }
33 |
34 | resource "test_assertions" "workspace_label" {
35 | component = "workspace_label"
36 |
37 | equal "scheme" {
38 | description = "labels have correct key/value pairs"
39 | got = module.mut.labels["kubestack.com/cluster_workspace"]
40 | want = local.exp_workspace
41 | }
42 | }
43 |
44 | resource "test_assertions" "workspace_tag" {
45 | component = "workspace_tag"
46 |
47 | check "contains" {
48 | description = "check the workspace is one of the tags"
49 | condition = contains(module.mut.tags, local.exp_workspace)
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/common/metadata/tests/defaults/test.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | test = {
4 | source = "terraform.io/builtin/test"
5 | }
6 | }
7 | }
8 |
9 | module "mut" {
10 | source = "../.."
11 |
12 | name_prefix = "testn"
13 | base_domain = "testd.example.com"
14 | provider_name = "testp"
15 | provider_region = "testr"
16 | }
17 |
18 | locals {
19 | exp_name = "testn-test_defaults-testr"
20 | exp_domain = "testp.testd.example.com"
21 | exp_fqdn = "testn-test_defaults-testr.testp.testd.example.com"
22 | exp_workspace = "test_defaults"
23 | exp_provider_name = "testp"
24 | exp_provider_region = "testr"
25 | }
26 |
27 | resource "test_assertions" "name" {
28 | component = "name"
29 |
30 | equal "scheme" {
31 | description = "name concatenates name_prefix, workspace and provider_region"
32 | got = module.mut.name
33 | want = local.exp_name
34 | }
35 | }
36 |
37 | resource "test_assertions" "domain" {
38 | component = "domain"
39 |
40 | equal "scheme" {
41 | description = "domain concatenates name and base_domain"
42 | got = module.mut.domain
43 | want = local.exp_domain
44 | }
45 | }
46 |
47 | resource "test_assertions" "fqdn" {
48 | component = "fqdn"
49 |
50 | equal "scheme" {
51 | description = "fqdn concatenates name and domain"
52 | got = module.mut.fqdn
53 | want = local.exp_fqdn
54 | }
55 | }
56 |
57 | resource "test_assertions" "labels" {
58 | component = "labels"
59 |
60 | equal "scheme" {
61 | description = "labels have correct key/value pairs"
62 | got = module.mut.labels
63 | want = {
64 | "kubestack.com/cluster_name" = local.exp_name
65 | "kubestack.com/cluster_domain" = local.exp_domain
66 | "kubestack.com/cluster_fqdn" = local.exp_fqdn
67 | "kubestack.com/cluster_workspace" = local.exp_workspace
68 | "kubestack.com/cluster_provider_name" = local.exp_provider_name
69 | "kubestack.com/cluster_provider_region" = local.exp_provider_region
70 | }
71 | }
72 | }
73 |
74 | resource "test_assertions" "label_namespace" {
75 | component = "label_namespace"
76 |
77 | equal "scheme" {
78 | description = "returns the used label_namespace"
79 | got = module.mut.label_namespace
80 | want = "kubestack.com/"
81 | }
82 | }
83 |
84 | resource "test_assertions" "tags" {
85 | component = "tags"
86 |
87 | equal "scheme" {
88 | description = "returns the used label_namespace"
89 | got = module.mut.tags
90 | want = [
91 | local.exp_name,
92 | local.exp_workspace,
93 | local.exp_provider_name,
94 | local.exp_provider_region,
95 | ]
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/common/metadata/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name_prefix" {
2 | type = string
3 | description = "String to prefix the name with."
4 | }
5 |
6 | variable "base_domain" {
7 | type = string
8 | description = "Domain to use for the cluster."
9 | }
10 |
11 | variable "provider_name" {
12 | type = string
13 | description = "Name of the cloud provider."
14 | }
15 |
16 | variable "provider_region" {
17 | type = string
18 | description = "Name of the region."
19 | }
20 |
21 | variable "workspace" {
22 | type = string
23 | description = "Name of the current workspace."
24 | default = ""
25 | }
26 |
27 | variable "delimiter" {
28 | type = string
29 | description = "Delimiter used between parts."
30 | default = "-"
31 | }
32 |
33 | variable "label_namespace" {
34 | type = string
35 | description = "Prefix labels are namespaced with."
36 | default = "kubestack.com/"
37 | }
38 |
39 |
--------------------------------------------------------------------------------
/common/metadata/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/google/_modules/gke/cluster.tf:
--------------------------------------------------------------------------------
1 | resource "google_container_cluster" "current" {
2 | project = var.project
3 | name = var.metadata_name
4 |
5 | deletion_protection = var.deletion_protection
6 |
7 | location = var.location
8 | node_locations = var.node_locations
9 |
10 | min_master_version = var.min_master_version
11 |
12 | release_channel {
13 | channel = var.release_channel
14 | }
15 |
16 | remove_default_node_pool = var.remove_default_node_pool
17 | initial_node_count = var.initial_node_count
18 |
19 | master_auth {
20 | client_certificate_config {
21 | issue_client_certificate = false
22 | }
23 | }
24 |
25 | network = google_compute_network.current.self_link
26 |
27 | dynamic "workload_identity_config" {
28 | for_each = var.disable_workload_identity == false ? toset([1]) : toset([])
29 | content {
30 | workload_pool = "${var.project}.svc.id.goog"
31 | }
32 | }
33 |
34 | dynamic "database_encryption" {
35 | for_each = var.cluster_database_encryption_key_name != null ? toset([1]) : toset([])
36 | content {
37 | state = "ENCRYPTED"
38 | key_name = var.cluster_database_encryption_key_name
39 | }
40 | }
41 |
42 | #
43 | #
44 | # Addon config
45 | addons_config {
46 | http_load_balancing {
47 | disabled = true
48 | }
49 |
50 | horizontal_pod_autoscaling {
51 | disabled = false
52 | }
53 |
54 | network_policy_config {
55 | disabled = false
56 | }
57 |
58 | dynamic "gcs_fuse_csi_driver_config" {
59 | for_each = var.enable_gcs_fuse_csi_driver != null ? [1] : []
60 |
61 | content {
62 | enabled = var.enable_gcs_fuse_csi_driver
63 | }
64 | }
65 | }
66 |
67 | network_policy {
68 | enabled = true
69 | }
70 |
71 | maintenance_policy {
72 | daily_maintenance_window {
73 | start_time = var.daily_maintenance_window_start_time
74 | }
75 |
76 | dynamic "maintenance_exclusion" {
77 | for_each = var.maintenance_exclusion_start_time != "" ? [1] : []
78 |
79 | content {
80 | start_time = var.maintenance_exclusion_start_time
81 | end_time = var.maintenance_exclusion_end_time
82 | exclusion_name = var.maintenance_exclusion_name
83 |
84 | exclusion_options {
85 | scope = var.maintenance_exclusion_scope
86 | }
87 | }
88 | }
89 | }
90 |
91 | dynamic "master_authorized_networks_config" {
92 | for_each = var.master_authorized_networks_config_cidr_blocks == null ? toset([]) : toset([1])
93 |
94 | content {
95 | dynamic "cidr_blocks" {
96 | for_each = var.master_authorized_networks_config_cidr_blocks
97 |
98 | content {
99 | cidr_block = cidr_blocks.value
100 | display_name = "terraform-kubestack_${cidr_blocks.value}"
101 | }
102 | }
103 | }
104 | }
105 |
106 | logging_config {
107 | enable_components = var.logging_config_enable_components
108 | }
109 |
110 | monitoring_config {
111 | enable_components = var.monitoring_config_enable_components
112 | }
113 |
114 | private_cluster_config {
115 | enable_private_nodes = var.enable_private_nodes
116 | enable_private_endpoint = false
117 | master_ipv4_cidr_block = var.master_cidr_block
118 | }
119 |
120 | dynamic "ip_allocation_policy" {
121 | for_each = var.enable_private_nodes ? toset([1]) : []
122 |
123 | content {
124 | cluster_ipv4_cidr_block = var.cluster_ipv4_cidr_block
125 | services_ipv4_cidr_block = var.services_ipv4_cidr_block
126 | }
127 | }
128 |
129 | enable_intranode_visibility = var.enable_intranode_visibility
130 | enable_tpu = var.enable_tpu
131 | }
132 |
--------------------------------------------------------------------------------
/google/_modules/gke/cluster_role_binding.tf:
--------------------------------------------------------------------------------
1 | data "google_client_openid_userinfo" "current" {
2 | }
3 |
4 | resource "kubernetes_cluster_role_binding" "current" {
5 | provider = kubernetes.gke
6 |
7 | metadata {
8 | name = "cluster-admin-kubestack"
9 | }
10 |
11 | role_ref {
12 | api_group = "rbac.authorization.k8s.io"
13 | kind = "ClusterRole"
14 | name = "cluster-admin"
15 | }
16 |
17 | subject {
18 | kind = "User"
19 | name = data.google_client_openid_userinfo.current.email
20 | api_group = "rbac.authorization.k8s.io"
21 | }
22 |
23 | depends_on = [google_container_cluster.current]
24 | }
25 |
--------------------------------------------------------------------------------
/google/_modules/gke/ingress.tf:
--------------------------------------------------------------------------------
1 | resource "google_compute_address" "current" {
2 | count = var.disable_default_ingress ? 0 : 1
3 |
4 | region = google_container_cluster.current.location
5 | project = var.project
6 |
7 | name = var.metadata_name
8 | }
9 |
10 | resource "google_dns_managed_zone" "current" {
11 | count = var.disable_default_ingress ? 0 : 1
12 |
13 | project = var.project
14 |
15 | name = var.metadata_name
16 | dns_name = "${var.metadata_fqdn}."
17 | }
18 |
19 | resource "google_dns_record_set" "host" {
20 | count = var.disable_default_ingress ? 0 : 1
21 |
22 | project = var.project
23 |
24 | name = google_dns_managed_zone.current[0].dns_name
25 | type = "A"
26 | ttl = 300
27 |
28 | managed_zone = google_dns_managed_zone.current[0].name
29 |
30 | rrdatas = [google_compute_address.current[0].address]
31 | }
32 |
33 | resource "google_dns_record_set" "wildcard" {
34 | count = var.disable_default_ingress ? 0 : 1
35 |
36 | project = var.project
37 |
38 | name = "*.${google_dns_managed_zone.current[0].dns_name}"
39 | type = "A"
40 | ttl = 300
41 |
42 | managed_zone = google_dns_managed_zone.current[0].name
43 |
44 | rrdatas = [google_compute_address.current[0].address]
45 | }
46 |
--------------------------------------------------------------------------------
/google/_modules/gke/kubeconfig.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | template_vars = {
3 | cluster_name = google_container_cluster.current.name
4 | cluster_endpoint = google_container_cluster.current.endpoint
5 | cluster_ca = google_container_cluster.current.master_auth[0].cluster_ca_certificate
6 | token = data.google_client_config.default.access_token
7 | }
8 |
9 | kubeconfig = templatefile("${path.module}/templates/kubeconfig.tpl", local.template_vars)
10 | }
11 |
--------------------------------------------------------------------------------
/google/_modules/gke/network.tf:
--------------------------------------------------------------------------------
1 | resource "google_compute_network" "current" {
2 | name = var.metadata_name
3 | project = var.project
4 | auto_create_subnetworks = "true"
5 | }
6 |
7 | resource "google_compute_address" "nat" {
8 | count = var.enable_cloud_nat ? var.cloud_nat_ip_count : 0
9 |
10 | region = google_container_cluster.current.location
11 | project = var.project
12 |
13 | name = "nat-${var.metadata_name}-${count.index}"
14 | }
15 |
16 | resource "google_compute_router" "current" {
17 | count = var.enable_cloud_nat ? 1 : 0
18 |
19 | project = var.project
20 | name = var.metadata_name
21 | region = google_container_cluster.current.location
22 |
23 | network = google_compute_network.current.name
24 |
25 | bgp {
26 | advertise_mode = (
27 | var.router_advertise_config == null
28 | ? null
29 | : var.router_advertise_config.mode
30 | )
31 | advertised_groups = (
32 | var.router_advertise_config == null ? null : (
33 | var.router_advertise_config.mode != "CUSTOM"
34 | ? null
35 | : var.router_advertise_config.groups
36 | )
37 | )
38 | dynamic "advertised_ip_ranges" {
39 | for_each = (
40 | var.router_advertise_config == null ? {} : (
41 | var.router_advertise_config.mode != "CUSTOM"
42 | ? {}
43 | : var.router_advertise_config.ip_ranges
44 | )
45 | )
46 | iterator = range
47 | content {
48 | range = range.key
49 | description = range.value
50 | }
51 | }
52 |
53 | # expected "bgp.0.asn" to be a RFC6996-compliant Local ASN:
54 | # must be either in the private ASN ranges: [64512..65534], [4200000000..4294967294];
55 | # or be the value of [16550]
56 | asn = var.router_asn != null ? var.router_asn : 16550
57 | }
58 | }
59 |
60 | resource "google_compute_router_nat" "nat" {
61 | count = var.enable_cloud_nat ? 1 : 0
62 |
63 | project = var.project
64 | name = var.metadata_name
65 | region = google_compute_router.current[0].region
66 | router = google_compute_router.current[0].name
67 |
68 | enable_endpoint_independent_mapping = var.cloud_nat_endpoint_independent_mapping
69 | min_ports_per_vm = var.cloud_nat_min_ports_per_vm
70 | nat_ip_allocate_option = var.cloud_nat_ip_count > 0 ? "MANUAL_ONLY" : "AUTO_ONLY"
71 | source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
72 | nat_ips = var.cloud_nat_ip_count > 0 ? google_compute_address.nat.*.self_link : null
73 |
74 | log_config {
75 | enable = true
76 | filter = "ERRORS_ONLY"
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool.tf:
--------------------------------------------------------------------------------
1 | module "node_pool" {
2 | source = "./node_pool"
3 |
4 | project = var.project
5 | location = google_container_cluster.current.location
6 |
7 | cluster_name = google_container_cluster.current.name
8 | pool_name = "default"
9 |
10 | service_account_email = google_service_account.current.email
11 | disable_per_node_pool_service_account = true
12 |
13 | metadata_tags = var.metadata_tags
14 | metadata_labels = var.metadata_labels
15 |
16 | initial_node_count = var.initial_node_count
17 | min_node_count = var.min_node_count
18 | max_node_count = var.max_node_count
19 | location_policy = var.location_policy
20 |
21 | extra_oauth_scopes = var.extra_oauth_scopes
22 |
23 | disk_size_gb = var.disk_size_gb
24 | disk_type = var.disk_type
25 | image_type = var.image_type
26 | machine_type = var.machine_type
27 |
28 | # Whether to use preemptible nodes for this node pool
29 | preemptible = var.preemptible
30 | # Whether the nodes will be automatically repaired
31 | auto_repair = var.auto_repair
32 | # Whether the nodes will be automatically upgraded
33 | auto_upgrade = var.auto_upgrade
34 |
35 | node_workload_metadata_config = var.node_workload_metadata_config
36 |
37 | guest_accelerator = var.guest_accelerator
38 | }
39 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | base_oauth_scopes = [
3 | "https://www.googleapis.com/auth/devstorage.read_only",
4 | "https://www.googleapis.com/auth/logging.write",
5 | "https://www.googleapis.com/auth/monitoring",
6 | "https://www.googleapis.com/auth/servicecontrol",
7 | "https://www.googleapis.com/auth/service.management.readonly",
8 | "https://www.googleapis.com/auth/trace.append",
9 | ]
10 |
11 | oauth_scopes = compact(concat(local.base_oauth_scopes, var.extra_oauth_scopes))
12 | }
13 |
14 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool/main.tf:
--------------------------------------------------------------------------------
1 | resource "google_container_node_pool" "current" {
2 | name = var.pool_name
3 | project = var.project
4 | cluster = var.cluster_name
5 | location = var.location
6 |
7 | initial_node_count = var.initial_node_count
8 |
9 | autoscaling {
10 | min_node_count = var.min_node_count
11 | max_node_count = var.max_node_count
12 | location_policy = var.location_policy
13 | }
14 |
15 | node_locations = var.node_locations
16 |
17 | dynamic "network_config" {
18 | for_each = var.network_config == null ? [] : [1]
19 |
20 | content {
21 | enable_private_nodes = var.network_config["enable_private_nodes"]
22 | create_pod_range = var.network_config["create_pod_range"]
23 | pod_ipv4_cidr_block = var.network_config["pod_ipv4_cidr_block"]
24 | }
25 | }
26 |
27 | #
28 | #
29 | # Node config
30 | node_config {
31 | service_account = var.disable_per_node_pool_service_account ? var.service_account_email : google_service_account.current[0].email
32 |
33 | oauth_scopes = local.oauth_scopes
34 |
35 | disk_size_gb = var.disk_size_gb
36 | disk_type = var.disk_type
37 |
38 | image_type = var.image_type
39 | machine_type = var.machine_type
40 | preemptible = var.preemptible
41 |
42 | labels = merge(var.labels, var.metadata_labels)
43 |
44 | tags = concat(var.metadata_tags, var.instance_tags)
45 |
46 | workload_metadata_config {
47 | mode = var.node_workload_metadata_config
48 | }
49 |
50 | dynamic "guest_accelerator" {
51 | # Make sure to generate this only once
52 | for_each = var.guest_accelerator == null ? [] : [1]
53 |
54 | content {
55 | type = var.guest_accelerator.type
56 | count = var.guest_accelerator.count
57 |
58 | dynamic "gpu_sharing_config" {
59 | for_each = var.guest_accelerator.gpu_sharing_config == null ? [] : [1]
60 |
61 | content {
62 | gpu_sharing_strategy = var.guest_accelerator.gpu_sharing_config.gpu_sharing_strategy
63 | max_shared_clients_per_gpu = var.guest_accelerator.gpu_sharing_config.max_shared_clients_per_gpu
64 | }
65 | }
66 | }
67 | }
68 |
69 | dynamic "taint" {
70 | for_each = var.taints == null ? [] : var.taints
71 |
72 | content {
73 | key = taint.value["key"]
74 | value = taint.value["value"]
75 | effect = taint.value["effect"]
76 | }
77 | }
78 |
79 | dynamic "ephemeral_storage_local_ssd_config" {
80 | for_each = var.ephemeral_storage_local_ssd_config == null ? [] : [1]
81 |
82 | content {
83 | local_ssd_count = var.ephemeral_storage_local_ssd_config.local_ssd_count
84 | }
85 | }
86 | }
87 |
88 | management {
89 | auto_repair = var.auto_repair
90 | auto_upgrade = var.auto_upgrade
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool/outputs.tf:
--------------------------------------------------------------------------------
1 | output "id" {
2 | value = google_container_node_pool.current.name
3 | description = "ID of the node pool."
4 | }
5 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool/service_account.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | account_id_prefix = join("-", [var.pool_name, var.cluster_name])
3 | account_id_suffix = sha512(local.account_id_prefix)
4 | account_id = "${substr(local.account_id_prefix, 0, 24)}-${substr(local.account_id_suffix, 0, 5)}"
5 | }
6 |
7 | resource "google_service_account" "current" {
8 | count = var.disable_per_node_pool_service_account ? 0 : 1
9 |
10 | account_id = local.account_id
11 | project = var.project
12 | }
13 |
14 | resource "google_project_iam_member" "log_writer" {
15 | count = var.disable_per_node_pool_service_account ? 0 : 1
16 |
17 | project = var.project
18 | role = "roles/logging.logWriter"
19 | member = "serviceAccount:${google_service_account.current[0].email}"
20 | }
21 |
22 | resource "google_project_iam_member" "metric_writer" {
23 | count = var.disable_per_node_pool_service_account ? 0 : 1
24 |
25 | project = var.project
26 | role = "roles/monitoring.metricWriter"
27 | member = "serviceAccount:${google_service_account.current[0].email}"
28 | }
29 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool/variables.tf:
--------------------------------------------------------------------------------
1 | variable "project" {
2 | type = string
3 | description = "Project the cluster belongs to."
4 | }
5 |
6 | variable "cluster_name" {
7 | type = string
8 | description = "Name of the cluster for this node pool."
9 | }
10 |
11 | variable "metadata_tags" {
12 | type = list(string)
13 | description = "Metadata tags to use."
14 | }
15 |
16 | variable "metadata_labels" {
17 | type = map(string)
18 | description = "Metadata labels to use."
19 | }
20 |
21 | variable "pool_name" {
22 | description = "Name of the node pool."
23 | type = string
24 | }
25 |
26 | variable "location" {
27 | type = string
28 | description = "location of the cluster this node pool belongs to."
29 | }
30 |
31 | variable "initial_node_count" {
32 | description = "Initial number of nodes for this node pool."
33 | type = string
34 | }
35 |
36 | variable "min_node_count" {
37 | description = "Min number of nodes for this node pool."
38 | type = string
39 | }
40 |
41 | variable "max_node_count" {
42 | description = "Max number of nodes for this node pool."
43 | type = string
44 | }
45 |
46 | variable "location_policy" {
47 | type = string
48 | description = "Location policy specifies the algorithm used when scaling-up the node pool."
49 | }
50 |
51 | variable "service_account_email" {
52 | description = "The service account email to use for this node pool."
53 | type = string
54 | default = null
55 | }
56 |
57 | variable "disable_per_node_pool_service_account" {
58 | description = "Skip creating a dedicated service account to use for this node pool."
59 | type = string
60 | default = false
61 | }
62 |
63 | variable "extra_oauth_scopes" {
64 | description = "List of additional oauth scopes for workers."
65 | type = list(string)
66 | }
67 |
68 | variable "disk_size_gb" {
69 | description = "The disk size of nodes in this pool."
70 | type = string
71 | }
72 |
73 | variable "disk_type" {
74 | description = "The disk type of nodes in this pool."
75 | type = string
76 | }
77 |
78 | variable "image_type" {
79 | description = "The image type for nodes in this pool."
80 | type = string
81 | default = "COS"
82 | }
83 |
84 | variable "machine_type" {
85 | description = "The machine type for nodes in this pool."
86 | type = string
87 | }
88 |
89 | variable "preemptible" {
90 | description = "Whether to use preemptible nodes for this node pool."
91 | type = string
92 | default = false
93 | }
94 |
95 | variable "auto_repair" {
96 | description = "Whether the nodes will be automatically repaired."
97 | type = string
98 | default = true
99 | }
100 |
101 | variable "auto_upgrade" {
102 | description = "Whether the nodes will be automatically upgraded."
103 | type = string
104 | default = true
105 | }
106 |
107 | variable "node_workload_metadata_config" {
108 | description = "How to expose the node metadata to the workload running on the node."
109 | type = string
110 | }
111 |
112 | variable "taints" {
113 | type = set(object({
114 | key = string
115 | value = string
116 | effect = string
117 | }))
118 | description = "Taints to configure for the node pool."
119 | default = null
120 | }
121 |
122 | variable "instance_tags" {
123 | type = list(string)
124 | description = "List of instance tags to apply to nodes."
125 | default = []
126 | }
127 |
128 | variable "node_locations" {
129 | type = list(string)
130 | description = "List of zones in the cluster's region to start worker nodes in. Defaults to cluster's node locations."
131 | default = null
132 | }
133 |
134 | variable "guest_accelerator" {
135 | type = object({
136 | type = string
137 | count = number
138 | gpu_partition_size = optional(string)
139 | gpu_sharing_config = optional(object({
140 | gpu_sharing_strategy = optional(string)
141 | max_shared_clients_per_gpu = optional(number)
142 | }))
143 | })
144 | description = "`guest_accelerator` block supports during node_group creation, useful to provision GPU-capable nodes. Default to `null` or `{}` which will disable GPUs."
145 | default = null
146 | }
147 |
148 | variable "ephemeral_storage_local_ssd_config" {
149 | type = object({
150 | local_ssd_count = number
151 | })
152 | description = "`ephemeral_storage_local_ssd_config` block, useful for node groups with local SSD. Defaults to `null`"
153 | default = null
154 | }
155 |
156 | variable "labels" {
157 | type = map(string)
158 | description = "Kubernetes labels to set on the nodes created by the node pool. Merged with Kubestack default labels."
159 | default = {}
160 | }
161 |
162 | variable "network_config" {
163 | type = object({
164 | enable_private_nodes = bool
165 | create_pod_range = bool
166 | pod_ipv4_cidr_block = string
167 | })
168 | description = "Configure additional pod IP address range for the node pool. Defaults to `null`"
169 | default = null
170 | }
171 |
--------------------------------------------------------------------------------
/google/_modules/gke/node_pool/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/google/_modules/gke/outputs.tf:
--------------------------------------------------------------------------------
1 | output "kubeconfig" {
2 | value = local.kubeconfig
3 |
4 | # when the node pool is destroyed before the k8s namespaces
5 | # the namespaces get stuck in terminating
6 | depends_on = [module.node_pool]
7 | }
8 |
9 | output "default_ingress_ip" {
10 | value = length(google_compute_address.current) > 0 ? google_compute_address.current[0].address : null
11 | }
12 |
--------------------------------------------------------------------------------
/google/_modules/gke/provider.tf:
--------------------------------------------------------------------------------
1 | data "google_client_config" "default" {
2 | }
3 |
4 | provider "kubernetes" {
5 | alias = "gke"
6 |
7 | host = "https://${google_container_cluster.current.endpoint}"
8 | cluster_ca_certificate = base64decode(
9 | google_container_cluster.current.master_auth[0].cluster_ca_certificate,
10 | )
11 | token = data.google_client_config.default.access_token
12 | }
13 |
--------------------------------------------------------------------------------
/google/_modules/gke/service_account.tf:
--------------------------------------------------------------------------------
1 | resource "google_service_account" "current" {
2 | account_id = substr(var.metadata_name, 0, 30)
3 | project = var.project
4 | }
5 |
6 | resource "google_project_iam_member" "log_writer" {
7 | project = var.project
8 | role = "roles/logging.logWriter"
9 | member = "serviceAccount:${google_service_account.current.email}"
10 | }
11 |
12 | resource "google_project_iam_member" "metric_writer" {
13 | project = var.project
14 | role = "roles/monitoring.metricWriter"
15 | member = "serviceAccount:${google_service_account.current.email}"
16 | }
17 |
18 |
--------------------------------------------------------------------------------
/google/_modules/gke/templates/kubeconfig.tpl:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | clusters:
3 | - cluster:
4 | server: https://${cluster_endpoint}
5 | certificate-authority-data: ${cluster_ca}
6 | name: ${cluster_name}
7 | contexts:
8 | - context:
9 | cluster: ${cluster_name}
10 | user: ${cluster_name}
11 | name: ${cluster_name}
12 | current-context: ${cluster_name}
13 | kind: Config
14 | preferences: {}
15 | users:
16 | - name: ${cluster_name}
17 | user:
18 | token: ${token}
19 |
--------------------------------------------------------------------------------
/google/_modules/gke/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_providers {
4 | google = {
5 | # https://registry.terraform.io/providers/hashicorp/google/latest
6 | source = "hashicorp/google"
7 | version = ">= 4.76.0"
8 | }
9 |
10 | kubernetes = {
11 | source = "hashicorp/kubernetes"
12 | }
13 | }
14 |
15 | required_version = ">= 0.13"
16 | }
17 |
--------------------------------------------------------------------------------
/google/cluster-local/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "../../common/configuration"
3 |
4 | configuration = var.configuration
5 | base_key = var.configuration_base_key
6 | }
7 |
8 | locals {
9 | # current workspace config
10 | cfg = module.configuration.merged[terraform.workspace]
11 |
12 | name_prefix = local.cfg["name_prefix"]
13 |
14 | base_domain = local.cfg["base_domain"]
15 |
16 | # while we have the real region for GKE
17 | # we still hash and prefix it with gke-
18 | # to align with the local implementations
19 | # for AKS end EKS
20 | fake_region_hash = substr(sha256(local.cfg["region"]), 0, 7)
21 | fake_region = "gke-${local.fake_region_hash}"
22 |
23 | http_port_default = terraform.workspace == "apps" ? 80 : 8080
24 | http_port = lookup(local.cfg, "http_port", local.http_port_default)
25 |
26 | https_port_default = terraform.workspace == "apps" ? 443 : 8443
27 | https_port = lookup(local.cfg, "https_port", local.https_port_default)
28 |
29 | disable_default_ingress = lookup(local.cfg, "disable_default_ingress", false)
30 |
31 | node_image = lookup(local.cfg, "node_image", null)
32 |
33 | # technically it should be min_node_count times number of AZs
34 | # but it seems better to keep node count low in the dev env
35 | node_count = lookup(local.cfg, "cluster_min_node_count", 1)
36 | nodes = [
37 | for node, _ in range(local.node_count) :
38 | "worker"
39 | ]
40 | extra_nodes = join(",", local.nodes)
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/google/cluster-local/main.tf:
--------------------------------------------------------------------------------
1 | module "cluster_metadata" {
2 | source = "../../common/metadata"
3 |
4 | name_prefix = local.name_prefix
5 | base_domain = local.base_domain
6 |
7 | provider_name = "gcp"
8 | provider_region = local.fake_region
9 | }
10 |
11 | module "cluster" {
12 | source = "../../kind/_modules/kind"
13 |
14 | metadata_name = module.cluster_metadata.name
15 | metadata_fqdn = module.cluster_metadata.fqdn
16 | metadata_tags = module.cluster_metadata.tags
17 | metadata_labels = module.cluster_metadata.labels
18 |
19 | node_image = local.node_image
20 | extra_nodes = local.extra_nodes
21 |
22 | http_port = local.http_port
23 | https_port = local.https_port
24 |
25 | disable_default_ingress = local.disable_default_ingress
26 | }
27 |
--------------------------------------------------------------------------------
/google/cluster-local/outputs.tf:
--------------------------------------------------------------------------------
1 | output "current_config" {
2 | value = module.configuration.merged[terraform.workspace]
3 | }
4 |
5 | output "current_metadata" {
6 | value = module.cluster_metadata
7 | }
8 |
9 | output "kubeconfig" {
10 | value = module.cluster.kubeconfig
11 | }
12 |
13 | output "default_ingress_ip" {
14 | # the cluster module returns an IP as a string
15 | # we YAML encode null for cluster-local to provide
16 | # a unified output to consumers
17 | value = yamlencode(null)
18 | }
19 |
--------------------------------------------------------------------------------
/google/cluster-local/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(map(string))
3 | description = "Map with per workspace cluster configuration."
4 | }
5 |
6 | variable "configuration_base_key" {
7 | type = string
8 | description = "The key in the configuration map all other keys inherit from."
9 | default = "apps"
10 | }
11 |
--------------------------------------------------------------------------------
/google/cluster-local/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.13"
4 | }
5 |
--------------------------------------------------------------------------------
/google/cluster/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "../../common/configuration"
3 |
4 | configuration = var.configuration
5 | base_key = var.configuration_base_key
6 | }
7 |
8 | locals {
9 | # current workspace config
10 | cfg = module.configuration.merged[terraform.workspace]
11 |
12 | name_prefix = local.cfg["name_prefix"]
13 |
14 | base_domain = local.cfg["base_domain"]
15 |
16 | project_id = local.cfg["project_id"]
17 |
18 | region = local.cfg["region"]
19 |
20 | deletion_protection = lookup(local.cfg, "deletion_protection", null)
21 |
22 | cluster_node_locations_lookup = lookup(local.cfg, "cluster_node_locations", "")
23 | cluster_node_locations = split(",", local.cluster_node_locations_lookup)
24 |
25 | cluster_min_master_version = local.cfg["cluster_min_master_version"]
26 | cluster_release_channel = lookup(local.cfg, "cluster_release_channel", "STABLE")
27 |
28 | cluster_daily_maintenance_window_start_time = lookup(
29 | local.cfg,
30 | "cluster_daily_maintenance_window_start_time",
31 | "03:00",
32 | )
33 |
34 | cluster_maintenance_exclusion_start_time = lookup(local.cfg, "cluster_maintenance_exclusion_start_time", "")
35 | cluster_maintenance_exclusion_end_time = lookup(local.cfg, "cluster_maintenance_exclusion_end_time", "")
36 | cluster_maintenance_exclusion_name = lookup(local.cfg, "cluster_maintenance_exclusion_name", "")
37 | cluster_maintenance_exclusion_scope = lookup(local.cfg, "cluster_maintenance_exclusion_scope", "")
38 |
39 | remove_default_node_pool = lookup(local.cfg, "remove_default_node_pool", true)
40 |
41 | cluster_initial_node_count = lookup(local.cfg, "cluster_initial_node_count", 1)
42 |
43 | cluster_min_node_count = lookup(local.cfg, "cluster_min_node_count", 1)
44 | cluster_max_node_count = lookup(local.cfg, "cluster_max_node_count", 1)
45 | cluster_node_location_policy = lookup(local.cfg, "cluster_node_location_policy", null)
46 |
47 | cluster_extra_oauth_scopes_lookup = lookup(local.cfg, "cluster_extra_oauth_scopes", "")
48 | cluster_extra_oauth_scopes = split(",", local.cluster_extra_oauth_scopes_lookup)
49 |
50 | cluster_disk_size_gb = lookup(local.cfg, "cluster_disk_size_gb", 100)
51 |
52 | cluster_disk_type = lookup(local.cfg, "cluster_disk_type", "pd-standard")
53 |
54 | cluster_image_type = lookup(local.cfg, "cluster_image_type", null)
55 |
56 | cluster_machine_type = lookup(local.cfg, "cluster_machine_type", "")
57 |
58 | cluster_preemptible = lookup(local.cfg, "cluster_preemptible", false)
59 |
60 | cluster_auto_repair = lookup(local.cfg, "cluster_auto_repair", true)
61 |
62 | cluster_auto_upgrade = lookup(local.cfg, "cluster_auto_upgrade", true)
63 |
64 | disable_default_ingress = lookup(local.cfg, "disable_default_ingress", false)
65 |
66 | enable_private_nodes = lookup(local.cfg, "enable_private_nodes", true)
67 | master_cidr_block = lookup(local.cfg, "master_cidr_block", "172.16.0.32/28")
68 |
69 | cluster_ipv4_cidr_block = lookup(local.cfg, "cluster_ipv4_cidr_block", null)
70 | services_ipv4_cidr_block = lookup(local.cfg, "services_ipv4_cidr_block", null)
71 |
72 | cluster_database_encryption_key_name = lookup(local.cfg, "cluster_database_encryption_key_name", null)
73 |
74 | # by default include cloud_nat when private nodes are enabled
75 | enable_cloud_nat = lookup(local.cfg, "enable_cloud_nat", local.enable_private_nodes)
76 | cloud_nat_endpoint_independent_mapping = lookup(local.cfg, "cloud_nat_enable_endpoint_independent_mapping", null)
77 | cloud_nat_min_ports_per_vm = lookup(local.cfg, "cloud_nat_min_ports_per_vm", null)
78 | cloud_nat_ip_count = lookup(local.cfg, "cloud_nat_ip_count", 0)
79 |
80 | disable_workload_identity = lookup(local.cfg, "disable_workload_identity", false)
81 | default_node_workload_metadata_config = tobool(local.disable_workload_identity) == false ? "GKE_METADATA" : "MODE_UNSPECIFIED"
82 | node_workload_metadata_config = lookup(local.cfg, "node_workload_metadata_config", local.default_node_workload_metadata_config)
83 |
84 | master_authorized_networks_config_cidr_blocks_lookup = lookup(local.cfg, "master_authorized_networks_config_cidr_blocks", null)
85 | master_authorized_networks_config_cidr_blocks = local.master_authorized_networks_config_cidr_blocks_lookup == null ? null : split(",", local.master_authorized_networks_config_cidr_blocks_lookup)
86 |
87 | enable_intranode_visibility = lookup(local.cfg, "enable_intranode_visibility", false)
88 | enable_tpu = lookup(local.cfg, "enable_tpu", false)
89 |
90 | router_advertise_config_groups_lookup = lookup(local.cfg, "router_advertise_config_groups", "")
91 | router_advertise_config_groups = compact(split(",", local.router_advertise_config_groups_lookup))
92 | router_advertise_config_ip_ranges_lookup = lookup(local.cfg, "router_advertise_config_ip_ranges", "")
93 | router_advertise_config_ip_ranges = compact(split(",", local.router_advertise_config_ip_ranges_lookup))
94 | router_advertise_config_mode = lookup(local.cfg, "router_advertise_config_mode", null)
95 | router_asn = lookup(local.cfg, "router_asn", null)
96 |
97 | logging_config_enable_components_lookup = lookup(local.cfg, "logging_config_enable_components", "SYSTEM_COMPONENTS,WORKLOADS")
98 | logging_config_enable_components = compact(split(",", local.logging_config_enable_components_lookup))
99 |
100 | monitoring_config_enable_components_lookup = lookup(local.cfg, "monitoring_config_enable_components", "SYSTEM_COMPONENTS")
101 | monitoring_config_enable_components = compact(split(",", local.monitoring_config_enable_components_lookup))
102 |
103 | enable_gcs_fuse_csi_driver = lookup(local.cfg, "enable_gcs_fuse_csi_driver", null)
104 | }
105 |
--------------------------------------------------------------------------------
/google/cluster/main.tf:
--------------------------------------------------------------------------------
1 | module "cluster_metadata" {
2 | source = "../../common/metadata"
3 |
4 | name_prefix = local.name_prefix
5 | base_domain = local.base_domain
6 |
7 | provider_name = "gcp"
8 | provider_region = local.region
9 | }
10 |
11 | module "cluster" {
12 | source = "../_modules/gke"
13 |
14 | project = local.project_id
15 |
16 | deletion_protection = local.deletion_protection
17 |
18 | metadata_name = module.cluster_metadata.name
19 | metadata_fqdn = module.cluster_metadata.fqdn
20 | metadata_tags = module.cluster_metadata.tags
21 | metadata_labels = module.cluster_metadata.labels
22 |
23 | location = local.region
24 | node_locations = local.cluster_node_locations
25 |
26 | min_master_version = local.cluster_min_master_version
27 | release_channel = local.cluster_release_channel
28 |
29 | daily_maintenance_window_start_time = local.cluster_daily_maintenance_window_start_time
30 |
31 | maintenance_exclusion_start_time = local.cluster_maintenance_exclusion_start_time
32 | maintenance_exclusion_end_time = local.cluster_maintenance_exclusion_end_time
33 | maintenance_exclusion_name = local.cluster_maintenance_exclusion_name
34 | maintenance_exclusion_scope = local.cluster_maintenance_exclusion_scope
35 |
36 | remove_default_node_pool = local.remove_default_node_pool
37 |
38 | initial_node_count = local.cluster_initial_node_count
39 | min_node_count = local.cluster_min_node_count
40 | max_node_count = local.cluster_max_node_count
41 | location_policy = local.cluster_node_location_policy
42 |
43 | extra_oauth_scopes = local.cluster_extra_oauth_scopes
44 |
45 | disk_size_gb = local.cluster_disk_size_gb
46 | disk_type = local.cluster_disk_type
47 | image_type = local.cluster_image_type
48 | machine_type = local.cluster_machine_type
49 |
50 | preemptible = local.cluster_preemptible
51 |
52 | auto_repair = local.cluster_auto_repair
53 |
54 | auto_upgrade = local.cluster_auto_upgrade
55 |
56 | disable_default_ingress = local.disable_default_ingress
57 |
58 | enable_private_nodes = local.enable_private_nodes
59 | master_cidr_block = local.master_cidr_block
60 |
61 | cluster_ipv4_cidr_block = local.cluster_ipv4_cidr_block
62 | services_ipv4_cidr_block = local.services_ipv4_cidr_block
63 |
64 | enable_cloud_nat = local.enable_cloud_nat
65 | cloud_nat_endpoint_independent_mapping = local.cloud_nat_endpoint_independent_mapping
66 | cloud_nat_ip_count = local.cloud_nat_ip_count
67 |
68 | master_authorized_networks_config_cidr_blocks = local.master_authorized_networks_config_cidr_blocks
69 |
70 | cloud_nat_min_ports_per_vm = local.cloud_nat_min_ports_per_vm
71 |
72 | disable_workload_identity = local.disable_workload_identity
73 | node_workload_metadata_config = local.node_workload_metadata_config
74 |
75 | cluster_database_encryption_key_name = local.cluster_database_encryption_key_name
76 |
77 | enable_intranode_visibility = local.enable_intranode_visibility
78 | enable_tpu = local.enable_tpu
79 |
80 | router_advertise_config = {
81 | groups = local.router_advertise_config_groups
82 | ip_ranges = { for ip in local.router_advertise_config_ip_ranges : ip => null }
83 | mode = local.router_advertise_config_mode
84 | }
85 | router_asn = local.router_asn
86 |
87 | logging_config_enable_components = local.logging_config_enable_components
88 | monitoring_config_enable_components = local.monitoring_config_enable_components
89 |
90 | enable_gcs_fuse_csi_driver = local.enable_gcs_fuse_csi_driver
91 | }
92 |
--------------------------------------------------------------------------------
/google/cluster/node-pool/configuration.tf:
--------------------------------------------------------------------------------
1 | module "configuration" {
2 | source = "../../../common/configuration"
3 |
4 | configuration = var.configuration
5 | base_key = var.configuration_base_key
6 | }
7 |
8 | locals {
9 | # current workspace config
10 | cfg = module.configuration.merged[terraform.workspace]
11 |
12 | project_id = local.cfg["project_id"]
13 |
14 | name = lookup(local.cfg, "name")
15 |
16 | location = local.cfg["location"]
17 | node_locations = local.cfg["node_locations"]
18 |
19 | initial_node_count = local.cfg["initial_node_count"]
20 | min_node_count = local.cfg["min_node_count"]
21 | max_node_count = local.cfg["max_node_count"]
22 | location_policy = local.cfg["location_policy"] != null ? local.cfg["location_policy"] : "BALANCED"
23 |
24 | disk_size_gb = local.cfg["disk_size_gb"]
25 | disk_type = local.cfg["disk_type"]
26 | image_type = local.cfg["image_type"]
27 | machine_type = local.cfg["machine_type"]
28 |
29 | preemptible = local.cfg["preemptible"] != null ? local.cfg["preemptible"] : false
30 | auto_repair = local.cfg["auto_repair"] != null ? local.cfg["auto_repair"] : true
31 | auto_upgrade = local.cfg["auto_upgrade"] != null ? local.cfg["auto_upgrade"] : true
32 |
33 | taints = local.cfg["taints"]
34 |
35 | labels = local.cfg["labels"]
36 |
37 | extra_oauth_scopes = local.cfg["extra_oauth_scopes"] != null ? local.cfg["extra_oauth_scopes"] : []
38 |
39 | node_workload_metadata_config = local.cfg["node_workload_metadata_config"] != null ? local.cfg["node_workload_metadata_config"] : "GKE_METADATA"
40 |
41 | service_account_email = local.cfg["service_account_email"]
42 |
43 | ephemeral_storage_local_ssd_config = local.cfg["ephemeral_storage_local_ssd_config"]
44 |
45 | guest_accelerator = local.cfg["guest_accelerator"]
46 | network_config = local.cfg["network_config"]
47 |
48 | instance_tags = local.cfg["instance_tags"] != null ? local.cfg["instance_tags"] : []
49 | }
50 |
--------------------------------------------------------------------------------
/google/cluster/node-pool/main.tf:
--------------------------------------------------------------------------------
1 | module "node_pool" {
2 | source = "../../_modules/gke/node_pool"
3 |
4 | project = local.project_id
5 |
6 | location = local.location
7 | node_locations = local.node_locations
8 |
9 | cluster_name = var.cluster_metadata["name"]
10 | pool_name = local.name
11 |
12 | metadata_tags = var.cluster_metadata["tags"]
13 | metadata_labels = var.cluster_metadata["labels"]
14 |
15 | initial_node_count = local.initial_node_count
16 | min_node_count = local.min_node_count
17 | max_node_count = local.max_node_count
18 | location_policy = local.location_policy
19 |
20 | extra_oauth_scopes = local.extra_oauth_scopes
21 |
22 | disk_size_gb = local.disk_size_gb
23 | disk_type = local.disk_type
24 | image_type = local.image_type
25 | machine_type = local.machine_type
26 |
27 | preemptible = local.preemptible
28 | auto_repair = local.auto_repair
29 | auto_upgrade = local.auto_upgrade
30 |
31 | node_workload_metadata_config = local.node_workload_metadata_config
32 |
33 | taints = local.taints
34 | instance_tags = local.instance_tags
35 |
36 | labels = local.labels
37 |
38 | service_account_email = local.service_account_email
39 | disable_per_node_pool_service_account = local.service_account_email == null ? false : true
40 |
41 | ephemeral_storage_local_ssd_config = local.ephemeral_storage_local_ssd_config
42 |
43 | guest_accelerator = local.guest_accelerator
44 |
45 | network_config = local.network_config
46 | }
47 |
--------------------------------------------------------------------------------
/google/cluster/node-pool/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(object({
3 | project_id = optional(string)
4 |
5 | name = optional(string)
6 |
7 | location = optional(string)
8 | node_locations = optional(list(string))
9 | location_policy = optional(string)
10 |
11 | initial_node_count = optional(string)
12 | min_node_count = optional(string)
13 | max_node_count = optional(string)
14 |
15 | disk_size_gb = optional(string)
16 | disk_type = optional(string)
17 | image_type = optional(string)
18 | machine_type = optional(string)
19 |
20 | preemptible = optional(bool)
21 | auto_repair = optional(bool)
22 | auto_upgrade = optional(bool)
23 |
24 | taints = optional(set(object({
25 | key = string
26 | value = string
27 | effect = string
28 | })))
29 |
30 | labels = optional(map(string))
31 |
32 | extra_oauth_scopes = optional(list(string))
33 |
34 | node_workload_metadata_config = optional(string)
35 |
36 | service_account_email = optional(string)
37 |
38 | ephemeral_storage_local_ssd_config = optional(object({
39 | local_ssd_count = number
40 | }))
41 |
42 | guest_accelerator = optional(object({
43 | type = string
44 | count = number
45 | gpu_partition_size = optional(string)
46 | gpu_sharing_config = optional(object({
47 | gpu_sharing_strategy = optional(string)
48 | max_shared_clients_per_gpu = optional(number)
49 | }))
50 | }))
51 |
52 | network_config = optional(object({
53 | enable_private_nodes = bool
54 | create_pod_range = bool
55 | pod_ipv4_cidr_block = string
56 | }))
57 |
58 | instance_tags = optional(list(string))
59 | }))
60 |
61 | description = "Map with per workspace cluster configuration."
62 | }
63 |
64 | variable "configuration_base_key" {
65 | type = string
66 | description = "The key in the configuration map all other keys inherit from."
67 | default = "apps"
68 | }
69 |
70 | variable "cluster_metadata" {
71 | type = any
72 | description = "Metadata of the cluster to attach the node pool to."
73 | }
74 |
--------------------------------------------------------------------------------
/google/cluster/node-pool/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/google"
5 | }
6 | }
7 |
8 | required_version = ">= 1.3.0"
9 | }
10 |
--------------------------------------------------------------------------------
/google/cluster/outputs.tf:
--------------------------------------------------------------------------------
1 | output "current_config" {
2 | value = module.configuration.merged[terraform.workspace]
3 | }
4 |
5 | output "current_metadata" {
6 | value = module.cluster_metadata
7 | }
8 |
9 | output "kubeconfig" {
10 | value = module.cluster.kubeconfig
11 | }
12 |
13 | output "default_ingress_ip" {
14 | value = module.cluster.default_ingress_ip
15 | }
16 |
--------------------------------------------------------------------------------
/google/cluster/variables.tf:
--------------------------------------------------------------------------------
1 | variable "configuration" {
2 | type = map(map(string))
3 | description = "Map with per workspace cluster configuration."
4 | }
5 |
6 | variable "configuration_base_key" {
7 | type = string
8 | description = "The key in the configuration map all other keys inherit from."
9 | default = "apps"
10 | }
11 |
--------------------------------------------------------------------------------
/google/cluster/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_providers {
4 | google = {
5 | source = "hashicorp/google"
6 | }
7 |
8 | kubernetes = {
9 | source = "hashicorp/kubernetes"
10 | }
11 | }
12 | required_version = ">= 0.13"
13 | }
14 |
--------------------------------------------------------------------------------
/kind/_modules/kind/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | kubeadm_config_patches = [
3 | <> /etc/passwd
15 | echo "kbst:x:${gid}:" >> /etc/group
16 |
17 | # if docker.sock is mounted, add our user to the socket's group
18 | if test -S "/var/run/docker.sock"; then
19 | echo "docker:x:$(stat -c '%g' /var/run/docker.sock):kbst" >> /etc/group
20 | fi
21 |
22 | # stop running as root to preserve volume mount file ownership
23 | exec runuser -u kbst -- entrypoint_user "$@"
24 |
--------------------------------------------------------------------------------
/oci/entrypoint_user:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | #
5 | #
6 | # AWS auth
7 | # only if aws cli is installed
8 | if [ -x "$(command -v aws)" ]; then
9 | AWS_CONFIG_PATH=~/.aws
10 | mkdir -p $AWS_CONFIG_PATH
11 |
12 | # handle base64 encoded AWS crendentials
13 | if [ ! -z "$KBST_AUTH_AWS" ]; then
14 | echo "$KBST_AUTH_AWS" | base64 --decode > $AWS_CONFIG_PATH/credentials
15 | AWS_EC2_METADATA_DISABLED=true aws sts get-caller-identity
16 | fi
17 | fi
18 |
19 | #
20 | #
21 | # Azure auth
22 | # only if az cli is installed
23 | if [ -x "$(command -v az)" ]; then
24 | AZ_CONFIG_PATH=~/.azure
25 | mkdir -p $AZ_CONFIG_PATH
26 | touch $AZ_CONFIG_PATH/KBST_AUTH_AZ
27 |
28 | # handle base64 encoded AZ crendentials
29 | if [ ! -z "$KBST_AUTH_AZ" ]; then
30 | echo "$KBST_AUTH_AZ" | base64 --decode > $AZ_CONFIG_PATH/KBST_AUTH_AZ
31 | . $AZ_CONFIG_PATH/KBST_AUTH_AZ
32 | az login --service-principal --username $ARM_CLIENT_ID --password $ARM_CLIENT_SECRET --tenant $ARM_TENANT_ID
33 | fi
34 |
35 | # always source and export the ARM_ env variables
36 | # required by the azurerm Terraform provider
37 | . $AZ_CONFIG_PATH/KBST_AUTH_AZ
38 | export ARM_ACCESS_KEY
39 | export ARM_CLIENT_ID
40 | export ARM_CLIENT_SECRET
41 | export ARM_SUBSCRIPTION_ID
42 | export ARM_TENANT_ID
43 | fi
44 |
45 | #
46 | #
47 | # Gcloud auth
48 | # only if gcloud cli is installed
49 | if [ -x "$(command -v gcloud)" ]; then
50 | GCLOUD_CONFIG_PATH=~/.config/gcloud
51 | mkdir -p $GCLOUD_CONFIG_PATH
52 |
53 | # handle base64 encoded GCLOUD crendentials
54 | if [ ! -z "$KBST_AUTH_GCLOUD" ]; then
55 | echo "$KBST_AUTH_GCLOUD" | base64 --decode > $GCLOUD_CONFIG_PATH/application_default_credentials.json
56 | gcloud auth activate-service-account --key-file $GCLOUD_CONFIG_PATH/application_default_credentials.json
57 | fi
58 | fi
59 |
60 | # do not have KBST_AUTH_* env vars set in runtime env
61 | unset KBST_AUTH_AWS
62 | unset KBST_AUTH_AZ
63 | unset KBST_AUTH_GCLOUD
64 |
65 | exec "$@"
66 |
--------------------------------------------------------------------------------
/quickstart/build_artifacts/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | name = "pypi"
3 | url = "https://pypi.org/simple"
4 | verify_ssl = true
5 |
6 | [dev-packages]
7 |
8 | [packages]
9 | jinja2 = "*"
10 |
11 | [requires]
12 | python_version = "3"
13 |
--------------------------------------------------------------------------------
/quickstart/build_artifacts/dist.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from os import environ, listdir, mkdir
4 | from os.path import isdir, exists, join
5 | from shutil import copytree, make_archive, rmtree
6 | from sys import argv, exit
7 |
8 | from jinja2 import Environment, FileSystemLoader
9 |
10 | SRCDIR = "../src"
11 | DISTDIR = "../_dist"
12 | ARTIFACT_PREFIX = "kubestack-starter-"
13 |
14 |
15 | def replace_template(dist_path, file_name, context):
16 | jinja = Environment(loader=FileSystemLoader(dist_path))
17 | template = jinja.get_template(file_name)
18 | data = template.render(context)
19 |
20 | with open(f"{dist_path}/{file_name}", "w") as f:
21 | f.write(data)
22 | # always include newline at end of file
23 | f.write("\n")
24 |
25 |
26 | def dist(version, image_name, configuration):
27 | configuration_src = f"{SRCDIR}/configurations/{configuration}"
28 | configuration_dist = f"{DISTDIR}/{ARTIFACT_PREFIX}{configuration}"
29 | manifests_src = f"{SRCDIR}/manifests"
30 | manifests_dist = f"{configuration_dist}/manifests"
31 |
32 | # Clean DISTDIR
33 | if isdir(configuration_dist):
34 | rmtree(configuration_dist)
35 |
36 | # Copy configuration
37 | copytree(configuration_src, configuration_dist)
38 | copytree(manifests_src, manifests_dist)
39 |
40 | # Replace templated version variables in *.tf files
41 | for tf_file in [n for n in listdir(configuration_dist) if n.endswith(".tf")]:
42 | replace_template(configuration_dist, tf_file, {"version": version})
43 |
44 | # Replace templated variables in Dockerfiles
45 | dockerfiles = ["Dockerfile", "Dockerfile.loc"]
46 | for dockerfile in dockerfiles:
47 | if exists(join(configuration_dist, dockerfile)):
48 | replace_template(
49 | configuration_dist,
50 | dockerfile,
51 | {"image_name": image_name, "image_tag": version},
52 | )
53 |
54 |
55 | def compress(version, configuration):
56 | starter = f"{ARTIFACT_PREFIX}{configuration}"
57 | archive = f"{DISTDIR}/{starter}-{version}"
58 | make_archive(archive, "zip", DISTDIR, starter)
59 |
60 |
61 | if __name__ == "__main__":
62 | # Use tag as version, fallback to commit sha
63 | version = environ.get("GIT_SHA")
64 | # Non tagged images go to a different image repository
65 | image_name = "ghcr.io/kbst/terraform-kubestack/dev"
66 |
67 | gitref = environ.get("GIT_REF")
68 | if gitref.startswith("refs/tags/"):
69 | version = gitref.replace("refs/tags/", "")
70 | # Tagged releases go to main image repository
71 | image_name = "kubestack/framework"
72 |
73 | try:
74 | target = argv[1]
75 | except IndexError:
76 | print("positional arg: 'target' missing:")
77 | exit("usage dist.py [dist | compress]")
78 |
79 | configurations = [
80 | n for n in listdir(f"{SRCDIR}/configurations") if not n.startswith("_")
81 | ]
82 |
83 | if target not in ["dist", "compress"]:
84 | exit("usage dist.py [dist | compress]")
85 |
86 | for configuration in configurations:
87 | if target == "dist":
88 | dist(version, image_name, configuration)
89 | continue
90 |
91 | if target == "compress":
92 | compress(version, configuration)
93 | continue
94 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/_shared/.user/README.md:
--------------------------------------------------------------------------------
1 | # What is `.user` for?
2 |
3 | `.user` is used as `$HOME`, so that for example the cloud CLIs have a place to store temporary data and configuration. This directory is in `.gitignore` so that changes are not committed but by also being under the mounted volume, still can be used to make files created at runtime available to the host easily.
4 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/_shared/README.md:
--------------------------------------------------------------------------------
1 | # Welcome to Kubestack
2 |
3 | This repository uses [Kubestack][1]. Kubestack is the open source GitOps framework for teams that want to automate infrastructure, not reinvent automation.
4 |
5 | - Cluster infrastructure and cluster services are defined using Terraform modules.
6 | - Popular cluster services are available from the Terraform module [catalog][2].
7 | - Both cluster and cluster service modules follow the Kubestack [inheritance model][3] to prevent configuration drift between environments.
8 | - All changes follow the same four-step process.
9 |
10 | Full [framework documentation][4] is available online.
11 |
12 | ## Making changes
13 |
14 | To make changes to the Kubernetes cluster(s), supporting infrastructure or the Kubernetes services defined in this repository follow the Kubestack [GitOps process][5]. The GitOps process ensures that changes are safely applied by first reviewing the proposed changes, then validating the changes against the _ops_ environment and finally promoting the changes to be applied against the _apps_ environment by setting a tag.
15 |
16 | To accelerate the developer workflow an auto-updating [development environment][6] can be run on localhost using the `kbst local apply` command.
17 |
18 | 1. Change
19 |
20 | Make changes to the configuration in a new branch. Commit the changed configuration and push your branch. The pipeline runs `terraform plan` against the _ops_ workspace.
21 |
22 | ```shell
23 | # checkout a new branch from main
24 | git checkout -b examplechange main
25 |
26 | # make your changes
27 |
28 | # commit your changes
29 | git commit # write a meaningful commit message
30 |
31 | # push your changes
32 | git push origin examplechange
33 | ```
34 |
35 | 1. Review
36 |
37 | Request a peer review of your changes. Team members review the changes and the Terraform plan. If reviewers require changes, make additional commits in the branch.
38 |
39 | ```shell
40 | # make sure you're in the correct branch
41 | git checkout examplechange
42 |
43 | # make changes required by the review
44 |
45 | # commit and push the required changes
46 | git commit # write a meaningful commit message
47 | git push origin examplechange
48 | ```
49 |
50 | 1. Merge
51 |
52 | If approved, merge your changes to main, to apply them against the _ops_ environment. After applying to _ops_ was successful, the pipeline runs Terraform plan against the _apps_ environment.
53 |
54 | ```shell
55 | # you can merge on the commandline
56 | # or by merging a pull request
57 | git checkout main
58 | git merge examplechange
59 | git push origin main
60 | ```
61 |
62 | 1. Promote
63 |
64 | Review the previous _apps_ environment plan and tag the merge commit to promote the same changes to the _apps_ environment.
65 |
66 | ```shell
67 | # make sure you're on the correct commit
68 | git checkout main
69 | git pull
70 | git log -1
71 |
72 | # if correct, tag the current commit
73 | # any tag prefixed with `apps-deploy-`
74 | # will trigger the pipeline
75 | git tag apps-deploy-$(date -I)-0
76 |
77 | # in case of multiple deploys on the same day,
78 | # increase the counter
79 | # e.g. git tag apps-deploy-2020-05-14-1
80 | ```
81 |
82 | ## Manual operations
83 |
84 | In case of the automation being unavailable, upgrades requiring manual steps or in disaster recovery scenarios run Terraform and the cloud CLI locally. Kubestack provides container images bundling all dependencies to use for both automated and manual operations.
85 |
86 | 1. Exec into container
87 |
88 | ```shell
89 | # Build the container image
90 | docker build -t kubestack .
91 |
92 | # Exec into the container image
93 | # add docker socket mount for local dev
94 | # -v /var/run/docker.sock:/var/run/docker.sock
95 | docker run --rm -ti \
96 | -v `pwd`:/infra \
97 | kubestack
98 | ```
99 |
100 | 1. Authenticate providers
101 |
102 | Credentials are cached inside the `.user` directory. The directory is excluded from Git by the default `.gitignore`.
103 |
104 | ```shell
105 | # for AWS
106 | aws configure
107 |
108 | # for Azure
109 | az login
110 |
111 | # for GCP
112 | gcloud init
113 | gcloud auth application-default login
114 | ```
115 |
116 | 1. Select desired environment
117 |
118 | ```shell
119 | # for ops
120 | terraform workspace select ops
121 |
122 | # or for apps
123 | terraform workspace select apps
124 | ```
125 |
126 | 1. Run Terraform commands
127 |
128 | ```shell
129 | # run terraform init
130 | terraform init
131 |
132 | # run, e.g. terraform plan
133 | terraform plan
134 | ```
135 |
136 | [1]: https://www.kubestack.com
137 | [2]: https://www.kubestack.com/catalog
138 | [3]: https://www.kubestack.com/framework/documentation/inheritance-model
139 | [4]: https://www.kubestack.com/framework/documentation
140 | [5]: https://www.kubestack.com/framework/documentation/gitops-process
141 | [6]: https://www.kubestack.com/framework/documentation/tutorial-develop-locally#provision-local-clusters
142 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/_shared/tpl_gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # .user home directory
9 | .user/
10 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/.gitignore:
--------------------------------------------------------------------------------
1 | ../_shared/tpl_gitignore
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/.user:
--------------------------------------------------------------------------------
1 | ../_shared/.user/
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-aks
2 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/Dockerfile.loc:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-kind
2 |
3 | ARG UID
4 | ARG GID
5 |
6 | RUN mkdir -p /infra/terraform.tfstate.d &&\
7 | chown ${UID}:${GID} -R /infra
8 |
9 | COPY manifests /infra/manifests
10 | COPY *.tf *.tfvars /infra/
11 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/README.md:
--------------------------------------------------------------------------------
1 | ../_shared/README.md
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/aks_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "aks_zero" {
2 | source = "github.com/kbst/terraform-kubestack//azurerm/cluster?ref={{version}}"
3 |
4 | configuration = {
5 | # apps envrionment configuration
6 | apps = {
7 | # Set name_prefix used to generate the cluster_name
8 | # [name_prefix]-[workspace]-[region]
9 | # e.g. name_prefix = kbst becomes: `kbst-apps-eu-west-1`
10 | # for small orgs the name works well
11 | # for bigger orgs consider department or team names
12 | name_prefix = ""
13 |
14 | # Set the base_domain used to generate the FQDN of the cluster
15 | # [cluster_name].[provider_name].[base_domain]
16 | # e.g. kbst-apps-eu-west-1.aws.infra.example.com
17 | base_domain = ""
18 |
19 | # The Azure resource group to use
20 | resource_group = ""
21 |
22 | # CNI/Advanced networking configuration parameters.
23 | # Leave commented for default 'kubenet' networking
24 | # vnet_address_space = "10.16.0.0/12" # accepts multiple comma-separated values
25 | # subnet_address_prefixes = "10.18.0.0/16" # accepts multiple comma-separated values
26 | # subnet_service_endpoints = null # accepts multiple comma-separated values
27 |
28 | # network_plugin = "azure"
29 | # network_policy = "azure"
30 | # service_cidr = "10.0.0.0/16"
31 | # dns_service_ip = "10.0.0.10"
32 | # max_pods = 30
33 | }
34 |
35 | # ops environment, inherits from apps
36 | ops = {}
37 |
38 | # loc environment, inherits from apps
39 | loc = {}
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/aks_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | features {}
3 | }
4 |
5 | provider "kustomization" {
6 | alias = "aks_zero"
7 | kubeconfig_raw = module.aks_zero.kubeconfig
8 | }
9 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/aks/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kustomization = {
4 | source = "kbst/kustomization"
5 | }
6 | }
7 |
8 | required_version = ">= 0.15"
9 | }
10 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/.gitignore:
--------------------------------------------------------------------------------
1 | ../_shared/tpl_gitignore
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/.user:
--------------------------------------------------------------------------------
1 | ../_shared/.user/
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-eks
2 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/Dockerfile.loc:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-kind
2 |
3 | ARG UID
4 | ARG GID
5 |
6 | RUN mkdir -p /infra/terraform.tfstate.d &&\
7 | chown ${UID}:${GID} -R /infra
8 |
9 | COPY manifests /infra/manifests
10 | COPY *.tf *.tfvars /infra/
11 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/README.md:
--------------------------------------------------------------------------------
1 | ../_shared/README.md
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/eks_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "eks_zero" {
2 | providers = {
3 | aws = aws.eks_zero
4 | kubernetes = kubernetes.eks_zero
5 | }
6 |
7 | source = "github.com/kbst/terraform-kubestack//aws/cluster?ref={{version}}"
8 |
9 | configuration = {
10 | # apps environment
11 | apps = {
12 | # Set name_prefix used to generate the cluster_name
13 | # [name_prefix]-[workspace]-[region]
14 | # e.g. name_prefix = kbst becomes: `kbst-apps-eu-west-1`
15 | # for small orgs the name works well
16 | # for bigger orgs consider department or team names
17 | name_prefix = ""
18 |
19 | # Set the base_domain used to generate the FQDN of the cluster
20 | # [cluster_name].[provider_name].[base_domain]
21 | # e.g. kbst-apps-eu-west-1.aws.infra.example.com
22 | base_domain = ""
23 |
24 | cluster_instance_type = "t3.small"
25 | cluster_desired_capacity = "1"
26 | cluster_min_size = "1"
27 | cluster_max_size = "3"
28 |
29 | # Comma-separated list of zone names to deploy worker nodes in
30 | # EKS requires a min. of 2 zones
31 | # Must match region set in provider
32 | # e.g. cluster_availability_zones = "eu-west-1a,eu-west-1b,eu-west-1c"
33 | # FIXME: Use actual list when TF 0.12 finally supports heterogeneous maps
34 | cluster_availability_zones = ""
35 | }
36 |
37 | # ops environment, inherits from apps
38 | ops = {}
39 |
40 | # loc environment, inherits from apps
41 | loc = {}
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/eks_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | alias = "eks_zero"
3 |
4 | # The AWS provider requires a region. Specify your region here,
5 | # the alias above is used to inject the correct provider into
6 | # the respective cluster module in clusters.tf
7 | region = ""
8 | }
9 |
10 | provider "kustomization" {
11 | alias = "eks_zero"
12 | kubeconfig_raw = module.eks_zero.kubeconfig
13 | }
14 |
15 | locals {
16 | eks_zero_kubeconfig = yamldecode(module.eks_zero.kubeconfig)
17 | }
18 |
19 | provider "kubernetes" {
20 | alias = "eks_zero"
21 |
22 | host = local.eks_zero_kubeconfig["clusters"][0]["cluster"]["server"]
23 | cluster_ca_certificate = base64decode(local.eks_zero_kubeconfig["clusters"][0]["cluster"]["certificate-authority-data"])
24 | token = local.eks_zero_kubeconfig["users"][0]["user"]["token"]
25 | }
26 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/eks/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kubernetes = {
4 | source = "hashicorp/kubernetes"
5 | }
6 |
7 | kustomization = {
8 | source = "kbst/kustomization"
9 | }
10 | }
11 |
12 | required_version = ">= 0.15"
13 | }
14 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/.gitignore:
--------------------------------------------------------------------------------
1 | ../_shared/tpl_gitignore
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/.user:
--------------------------------------------------------------------------------
1 | ../_shared/.user/
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-gke
2 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/Dockerfile.loc:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-kind
2 |
3 | ARG UID
4 | ARG GID
5 |
6 | RUN mkdir -p /infra/terraform.tfstate.d &&\
7 | chown ${UID}:${GID} -R /infra
8 |
9 | COPY manifests /infra/manifests
10 | COPY *.tf *.tfvars /infra/
11 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/README.md:
--------------------------------------------------------------------------------
1 | ../_shared/README.md
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/gke_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "gke_zero" {
2 | providers = {
3 | kubernetes = kubernetes.gke_zero
4 | }
5 |
6 | source = "github.com/kbst/terraform-kubestack//google/cluster?ref={{version}}"
7 |
8 | configuration = {
9 | # apps environment
10 | apps = {
11 | # The Google cloud project ID to use
12 | project_id = ""
13 |
14 | # Set name_prefix used to generate the cluster_name
15 | # [name_prefix]-[workspace]-[region]
16 | # e.g. name_prefix = kbst becomes: `kbst-apps-europe-west3`
17 | # for small orgs the name works well,
18 | # for bigger orgs consider department or team names
19 | name_prefix = ""
20 |
21 | # Set the base_domain used to generate the FQDN of the cluster
22 | # [cluster_name].[provider_name].[base_domain]
23 | # e.g. kbst-apps-europe-west3.gcp.infra.example.com
24 | base_domain = ""
25 |
26 | # Initial desired K8s version, will be upgraded automatically
27 | cluster_min_master_version = "1.22"
28 |
29 | # Initial number of desired nodes per zone
30 | cluster_initial_node_count = 1
31 |
32 | # The Google cloud region to deploy the clusters in
33 | region = ""
34 |
35 | # Comma-separated list of zone names to deploy worker nodes in.
36 | # Must match region above.
37 | # e.g. cluster_node_locations = "europe-west3-a,europe-west3-b,europe-west3-c"
38 | # FIXME: Use actual list when TF 0.12 finally supports heterogeneous maps
39 | cluster_node_locations = ""
40 | }
41 |
42 | # ops environment, inherits from apps
43 | ops = {}
44 |
45 | # loc environment, inherits from apps
46 | loc = {}
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/gke_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "kustomization" {
2 | alias = "gke_zero"
3 | kubeconfig_raw = module.gke_zero.kubeconfig
4 | }
5 |
6 | locals {
7 | gke_zero_kubeconfig = yamldecode(module.gke_zero.kubeconfig)
8 | }
9 |
10 | provider "kubernetes" {
11 | alias = "gke_zero"
12 |
13 | host = local.gke_zero_kubeconfig["clusters"][0]["cluster"]["server"]
14 | cluster_ca_certificate = base64decode(local.gke_zero_kubeconfig["clusters"][0]["cluster"]["certificate-authority-data"])
15 | token = local.gke_zero_kubeconfig["users"][0]["user"]["token"]
16 | }
17 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/gke/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kustomization = {
4 | source = "kbst/kustomization"
5 | }
6 | }
7 |
8 | required_version = ">= 0.15"
9 | }
10 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/.gitignore:
--------------------------------------------------------------------------------
1 | ../_shared/tpl_gitignore
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/.user:
--------------------------------------------------------------------------------
1 | ../_shared/.user/
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-kind
2 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/README.md:
--------------------------------------------------------------------------------
1 | ../_shared/README.md
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/kind_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "kind_zero" {
2 | source = "github.com/kbst/terraform-kubestack//kind/cluster?ref={{version}}"
3 |
4 | configuration = {
5 | # Settings for Apps-cluster
6 | apps = {
7 | name_prefix = "kind"
8 | base_domain = "infra.127.0.0.1.xip.io"
9 |
10 | # clusters always have at least one control-plane node
11 | # uncommenting extra_nodes below will give you a cluster
12 | # with 3 control-plane nodes and 3 worker nodes
13 | # extra_nodes = "control-plane,control-plane,worker,worker,worker"
14 | }
15 |
16 | # Settings for Ops-cluster
17 | ops = {
18 | # optionally reduce number of ops nodes
19 | # extra_nodes = "worker"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/kind_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "kustomization" {
2 | alias = "kind_zero"
3 | kubeconfig_raw = module.kind_zero.kubeconfig
4 | }
5 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/kind/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kustomization = {
4 | source = "kbst/kustomization"
5 | }
6 | }
7 |
8 | required_version = ">= 0.15"
9 | }
10 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/.gitignore:
--------------------------------------------------------------------------------
1 | ../_shared/tpl_gitignore
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/.user:
--------------------------------------------------------------------------------
1 | ../_shared/.user/
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}
2 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/Dockerfile.loc:
--------------------------------------------------------------------------------
1 | FROM {{image_name}}:{{image_tag}}-kind
2 |
3 | ARG UID
4 | ARG GID
5 |
6 | RUN mkdir -p /infra/terraform.tfstate.d &&\
7 | chown ${UID}:${GID} -R /infra
8 |
9 | COPY manifests /infra/manifests
10 | COPY *.tf *.tfvars /infra/
11 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/README.md:
--------------------------------------------------------------------------------
1 | ../_shared/README.md
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/aks_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "aks_zero" {
2 | source = "github.com/kbst/terraform-kubestack//azurerm/cluster?ref={{version}}"
3 |
4 | configuration = {
5 | # Settings for Apps-cluster
6 | apps = {
7 | # Set name_prefix used to generate the cluster_name
8 | # [name_prefix]-[workspace]-[region]
9 | # e.g. name_prefix = kbst becomes: `kbst-apps-eu-west-1`
10 | # for small orgs the name works well
11 | # for bigger orgs consider department or team names
12 | name_prefix = ""
13 |
14 | # Set the base_domain used to generate the FQDN of the cluster
15 | # [cluster_name].[provider_name].[base_domain]
16 | # e.g. kbst-apps-eu-west-1.aws.infra.example.com
17 | base_domain = ""
18 |
19 | # The Azure resource group to use
20 | resource_group = ""
21 | }
22 |
23 | # Settings for Ops-cluster
24 | ops = {}
25 |
26 | loc = {}
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/aks_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | features {}
3 | }
4 |
5 | provider "kustomization" {
6 | alias = "aks_zero"
7 | kubeconfig_raw = module.aks_zero.kubeconfig
8 | }
9 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/eks_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "eks_zero" {
2 | providers = {
3 | aws = aws.eks_zero
4 | kubernetes = kubernetes.eks_zero
5 | }
6 |
7 | source = "github.com/kbst/terraform-kubestack//aws/cluster?ref={{version}}"
8 |
9 | configuration = {
10 | # Settings for Apps-cluster
11 | apps = {
12 | # Set name_prefix used to generate the cluster_name
13 | # [name_prefix]-[workspace]-[region]
14 | # e.g. name_prefix = kbst becomes: `kbst-apps-eu-west-1`
15 | # for small orgs the name works well
16 | # for bigger orgs consider department or team names
17 | name_prefix = ""
18 |
19 | # Set the base_domain used to generate the FQDN of the cluster
20 | # [cluster_name].[provider_name].[base_domain]
21 | # e.g. kbst-apps-eu-west-1.aws.infra.example.com
22 | base_domain = ""
23 |
24 | cluster_instance_type = "t3.small"
25 | cluster_desired_capacity = "1"
26 | cluster_min_size = "1"
27 | cluster_max_size = "3"
28 |
29 | # Comma-separated list of zone names to deploy worker nodes in
30 | # EKS requires a min. of 2 zones
31 | # Must match region set in provider
32 | # e.g. cluster_availability_zones = "eu-west-1a,eu-west-1b,eu-west-1c"
33 | # FIXME: Use actual list when TF 0.12 finally supports heterogeneous maps
34 | cluster_availability_zones = ""
35 | }
36 |
37 | # Settings for Ops-cluster
38 | ops = {
39 | # Overwrite apps["cluster_availability_zones"] to have a smaller
40 | # ops cluster
41 | # EKS requires a min. of 2 zones
42 | # e.g. cluster_availability_zones = "eu-west-1a,eu-west-1b"
43 | cluster_availability_zones = ""
44 | }
45 |
46 | loc = {}
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/eks_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | alias = "eks_zero"
3 |
4 | # The AWS provider requires a region. Specify your region here,
5 | # the alias above is used to inject the correct provider into
6 | # the respective cluster module in clusters.tf
7 | region = ""
8 | }
9 |
10 |
11 | provider "kustomization" {
12 | alias = "eks_zero"
13 | kubeconfig_raw = module.eks_zero.kubeconfig
14 | }
15 |
16 | locals {
17 | eks_zero_kubeconfig = yamldecode(module.eks_zero.kubeconfig)
18 | }
19 |
20 | provider "kubernetes" {
21 | alias = "eks_zero"
22 |
23 | host = local.eks_zero_kubeconfig["clusters"][0]["cluster"]["server"]
24 | cluster_ca_certificate = base64decode(local.eks_zero_kubeconfig["clusters"][0]["cluster"]["certificate-authority-data"])
25 | token = local.eks_zero_kubeconfig["users"][0]["user"]["token"]
26 | }
27 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/gke_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "gke_zero" {
2 | providers = {
3 | kubernetes = kubernetes.eks_zero
4 | }
5 |
6 | source = "github.com/kbst/terraform-kubestack//google/cluster?ref={{version}}"
7 |
8 | configuration = {
9 | # Settings for Apps-cluster
10 | apps = {
11 | # The Google cloud project ID to use
12 | project_id = ""
13 |
14 | # Set name_prefix used to generate the cluster_name
15 | # [name_prefix]-[workspace]-[region]
16 | # e.g. name_prefix = kbst becomes: `kbst-apps-europe-west3`
17 | # for small orgs the name works well,
18 | # for bigger orgs consider department or team names
19 | name_prefix = ""
20 |
21 | # Set the base_domain used to generate the FQDN of the cluster
22 | # [cluster_name].[provider_name].[base_domain]
23 | # e.g. kbst-apps-europe-west3.gcp.infra.example.com
24 | base_domain = ""
25 |
26 | # Initial desired K8s version, will be upgraded automatically
27 | cluster_min_master_version = "1.22"
28 |
29 | # Initial number of desired nodes per zone
30 | cluster_initial_node_count = 1
31 |
32 | # The Google cloud region to deploy the clusters in
33 | region = ""
34 |
35 | # Comma-separated list of zone names to deploy worker nodes in.
36 | # Must match region above.
37 | # e.g. cluster_node_locations = "europe-west3-a,europe-west3-b,europe-west3-c"
38 | # FIXME: Use actual list when TF 0.12 finally supports heterogeneous maps
39 | cluster_node_locations = ""
40 | }
41 |
42 | # Settings for Ops-cluster
43 | # configuration here overwrites the values from apps
44 | ops = {
45 | # Overwrite apps["cluster_node_locations"] to have a smaller
46 | # ops cluster
47 | # e.g. cluster_node_locations = "europe-west3-a"
48 | cluster_node_locations = ""
49 | }
50 |
51 | loc = {}
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/gke_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "kustomization" {
2 | alias = "gke_zero"
3 | kubeconfig_raw = module.gke_zero.kubeconfig
4 | }
5 |
6 | locals {
7 | gke_zero_kubeconfig = yamldecode(module.gke_zero.kubeconfig)
8 | }
9 |
10 | provider "kubernetes" {
11 | alias = "gke_zero"
12 |
13 | host = local.gke_zero_kubeconfig["clusters"][0]["cluster"]["server"]
14 | cluster_ca_certificate = base64decode(local.gke_zero_kubeconfig["clusters"][0]["cluster"]["certificate-authority-data"])
15 | token = local.gke_zero_kubeconfig["users"][0]["user"]["token"]
16 | }
17 |
--------------------------------------------------------------------------------
/quickstart/src/configurations/multi-cloud/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | kubernetes = {
4 | source = "hashicorp/kubernetes"
5 | }
6 |
7 | kustomization = {
8 | source = "kbst/kustomization"
9 | }
10 | }
11 |
12 | required_version = ">= 0.15"
13 | }
14 |
--------------------------------------------------------------------------------
/quickstart/src/manifests/.gitempty:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kbst/terraform-kubestack/dad56a5606bb0367ca8fb3808803e037565a2389/quickstart/src/manifests/.gitempty
--------------------------------------------------------------------------------
/tests/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # .user home directory
9 | .user/
10 |
11 | # terraform generated clusters directory
12 | clusters/
13 |
--------------------------------------------------------------------------------
/tests/aks_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "aks_zero" {
2 | source = "../azurerm/cluster"
3 |
4 | configuration = {
5 | # Settings for Apps-cluster
6 | apps = {
7 | resource_group = "terraform-kubestack-testing"
8 | name_prefix = "kbstacctest"
9 | base_domain = "infra.serverwolken.de"
10 |
11 | default_node_pool_vm_size = "Standard_B2s"
12 | default_node_pool_min_count = 1
13 | default_node_pool_max_count = 1
14 |
15 | network_plugin = "azure"
16 |
17 | sku_tier = "Standard"
18 | }
19 |
20 | # Settings for Ops-cluster
21 | ops = {}
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/tests/aks_zero_node_pools.tf:
--------------------------------------------------------------------------------
1 | module "aks_zero_node_pool" {
2 | source = "../azurerm/cluster/node-pool"
3 |
4 | cluster_name = module.aks_zero.current_metadata["name"]
5 | resource_group = module.aks_zero.current_config["resource_group"]
6 |
7 | configuration = {
8 | # Settings for Apps-cluster
9 | apps = {
10 | node_pool_name = "test1"
11 |
12 | vm_size = "Standard_B2s"
13 | min_count = 1
14 | max_count = 1
15 |
16 | node_taints = [
17 | "nvidia.com/gpu=present:NoSchedule"
18 | ]
19 | }
20 |
21 | # Settings for Ops-cluster
22 | ops = {}
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/tests/aks_zero_providers.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | features {}
3 | }
4 |
5 | provider "kustomization" {
6 | alias = "aks_zero"
7 | kubeconfig_raw = module.aks_zero.kubeconfig
8 | }
9 |
--------------------------------------------------------------------------------
/tests/ci-cd:
--------------------------------------------------------------------------------
1 | ../oci
--------------------------------------------------------------------------------
/tests/eks_zero_cluster.tf:
--------------------------------------------------------------------------------
1 | module "eks_zero" {
2 | providers = {
3 | aws = aws.eks_zero
4 | kubernetes = kubernetes.eks_zero
5 | }
6 |
7 | source = "../aws/cluster"
8 |
9 | configuration = {
10 | # Settings for Apps-cluster
11 | apps = {
12 | name_prefix = "kbstacctest"
13 | base_domain = "infra.serverwolken.de"
14 | cluster_instance_type = "t3a.medium"
15 | cluster_desired_capacity = "1"
16 | cluster_min_size = "1"
17 | cluster_max_size = "1"
18 | cluster_availability_zones = "eu-west-1a,eu-west-1b,eu-west-1c"
19 |
20 | cluster_aws_auth_map_users = <