├── examples ├── self_managed │ ├── output.tf │ ├── versions.tf │ └── example.tf ├── eks-auto-mode │ ├── variables.tf │ ├── versions.tf │ ├── deployment.yaml │ ├── outputs.tf │ └── main.tf ├── aws_managed_with_fargate │ ├── output.tf │ ├── versions.tf │ └── example.tf ├── complete │ ├── output.tf │ ├── versions.tf │ └── example.tf ├── aws_managed │ ├── versions.tf │ ├── output.tf │ └── example.tf └── eks-nodegroup-with-existing-cluster │ ├── versions.tf │ ├── output.tf │ └── example.tf ├── .deepsource.toml ├── Makefile ├── .github ├── CODEOWNERS ├── workflows │ ├── tfsec.yml │ ├── tflint.yml │ ├── changelog.yml │ ├── automerge.yml │ ├── auto_assignee.yml │ ├── readme.yml │ └── tf-checks.yml ├── PULL_REQUEST_TEMPLATE.md └── dependbot.yml ├── node_group ├── self_managed │ ├── _userdata.tpl │ ├── outputs.tf │ └── main.tf ├── aws_managed │ ├── versions.tf │ ├── outputs.tf │ ├── variables.tf │ └── main.tf └── fargate_profile │ ├── variables.tf │ └── fargate.tf ├── versions.tf ├── fargate_profile.tf ├── data.tf ├── kubeconfig.tpl ├── LICENSE ├── .gitignore ├── .pre-commit-config.yaml ├── aws-auth-auto-mode.tf ├── kms.tf ├── security_groups.tf ├── outputs.tf ├── aws_auth.tf ├── locals.tf ├── aws_node_groups.tf ├── main.tf ├── README.yaml ├── self_node_groups.tf ├── README.md ├── iam.tf ├── docs └── io.md └── CHANGELOG.md /examples/self_managed/output.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/eks-auto-mode/variables.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/aws_managed_with_fargate/output.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "terraform" -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export GENIE_PATH ?= $(shell 'pwd')/../../../genie 2 | include $(GENIE_PATH)/Makefile 3 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in the repo. 2 | * @anmolnagpal @clouddrove/approvers @clouddrove-ci 3 | -------------------------------------------------------------------------------- /node_group/self_managed/_userdata.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /etc/eks/bootstrap.sh --apiserver-endpoint '${cluster_endpoint}' --b64-cluster-ca '${certificate_authority_data}' ${bootstrap_extra_args} '${cluster_name}' 3 | -------------------------------------------------------------------------------- /examples/eks-auto-mode/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.2" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.83" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | } 11 | } 12 | 13 | -------------------------------------------------------------------------------- /examples/complete/output.tf: -------------------------------------------------------------------------------- 1 | output "eks_name" { 2 | value = module.eks.cluster_id 3 | } 4 | 5 | output "node_iam_role_name" { 6 | value = module.eks.node_group_iam_role_name 7 | } 8 | 9 | output "tags" { 10 | value = module.eks.tags 11 | } -------------------------------------------------------------------------------- /.github/workflows/tfsec.yml: -------------------------------------------------------------------------------- 1 | name: tfsec 2 | permissions: write-all 3 | on: 4 | pull_request: 5 | workflow_dispatch: 6 | jobs: 7 | tfsec: 8 | uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@master 9 | secrets: inherit 10 | with: 11 | working_directory: '.' 12 | -------------------------------------------------------------------------------- /.github/workflows/tflint.yml: -------------------------------------------------------------------------------- 1 | name: tf-lint 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | workflow_dispatch: 7 | jobs: 8 | tf-lint: 9 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@master 10 | secrets: 11 | GITHUB: ${{ secrets.GITHUB }} 12 | -------------------------------------------------------------------------------- /examples/self_managed/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.5.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.11.0" 8 | } 9 | cloudinit = { 10 | source = "hashicorp/cloudinit" 11 | version = ">= 2.0" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /node_group/aws_managed/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.5.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.11.0" 8 | } 9 | cloudinit = { 10 | source = "hashicorp/cloudinit" 11 | version = ">= 2.0" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /.github/workflows/changelog.yml: -------------------------------------------------------------------------------- 1 | name: changelog 2 | permissions: write-all 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | workflow_dispatch: 8 | jobs: 9 | changelog: 10 | uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@master 11 | secrets: inherit 12 | with: 13 | branch: 'master' 14 | -------------------------------------------------------------------------------- /examples/complete/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /examples/aws_managed/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /.github/workflows/automerge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Auto merge 3 | on: 4 | pull_request: 5 | jobs: 6 | auto-merge: 7 | uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@master 8 | secrets: 9 | GITHUB: ${{ secrets.GITHUB }} 10 | with: 11 | tfcheck: 'tf-checks-aws-managed-example / Check code format' 12 | ... 13 | -------------------------------------------------------------------------------- /examples/aws_managed_with_fargate/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /examples/eks-nodegroup-with-existing-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /examples/eks-nodegroup-with-existing-cluster/output.tf: -------------------------------------------------------------------------------- 1 | output "node_group_role_arn" { 2 | description = "ARN of the IAM role assigned to the EKS managed node group." 3 | value = module.node-group-role.arn 4 | } 5 | 6 | output "node_group_role_name" { 7 | description = "Name of the IAM role assigned to the EKS managed node group." 8 | value = module.node-group-role.name 9 | } -------------------------------------------------------------------------------- /.github/workflows/auto_assignee.yml: -------------------------------------------------------------------------------- 1 | name: Auto Assign PRs 2 | 3 | on: 4 | pull_request: 5 | types: [opened, reopened] 6 | 7 | workflow_dispatch: 8 | jobs: 9 | assignee: 10 | uses: clouddrove/github-shared-workflows/.github/workflows/auto_assignee.yml@master 11 | secrets: 12 | GITHUB: ${{ secrets.GITHUB }} 13 | with: 14 | assignees: 'clouddrove-ci' 15 | -------------------------------------------------------------------------------- /.github/workflows/readme.yml: -------------------------------------------------------------------------------- 1 | name: Readme Workflow 2 | on: 3 | push: 4 | branches: 5 | - master 6 | paths-ignore: 7 | - 'README.md' 8 | - 'docs/**' 9 | workflow_dispatch: 10 | jobs: 11 | README: 12 | uses: clouddrove/github-shared-workflows/.github/workflows/readme.yml@master 13 | secrets: 14 | TOKEN : ${{ secrets.GITHUB }} 15 | SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} -------------------------------------------------------------------------------- /fargate_profile.tf: -------------------------------------------------------------------------------- 1 | module "fargate" { 2 | source = "./node_group/fargate_profile" 3 | 4 | name = var.name 5 | environment = var.environment 6 | label_order = var.label_order 7 | enabled = var.enabled 8 | fargate_enabled = var.fargate_enabled 9 | cluster_name = try(aws_eks_cluster.default[0].name, var.cluster_name) 10 | fargate_profiles = var.fargate_profiles 11 | subnet_ids = var.subnet_ids 12 | 13 | } -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | data "aws_region" "current" {} 4 | data "aws_eks_cluster" "eks_cluster" { 5 | name = try(aws_eks_cluster.default[0].name, var.cluster_name) 6 | } 7 | data "aws_subnets" "eks" { 8 | count = var.external_cluster ? 1 : 0 9 | filter { 10 | name = var.subnet_filter_name 11 | values = var.subnet_filter_values 12 | } 13 | 14 | filter { 15 | name = "vpc-id" 16 | values = [data.aws_eks_cluster.eks_cluster.vpc_config[0].vpc_id] 17 | } 18 | } -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## what 2 | * Describe high-level what changed as a result of these commits (i.e. in plain-english, what do these changes mean?) 3 | * Use bullet points to be concise and to the point. 4 | 5 | ## why 6 | * Provide the justifications for the changes (e.g. business case). 7 | * Describe why these changes were made (e.g. why do these commits fix the problem?) 8 | * Use bullet points to be concise and to the point. 9 | 10 | ## references 11 | * Link to any supporting jira issues or helpful documentation to add some context (e.g. stackoverflow). 12 | * Use `closes #123`, if this PR closes a Jira issue `#123` 13 | -------------------------------------------------------------------------------- /kubeconfig.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | preferences: {} 4 | clusters: 5 | - cluster: 6 | server: ${server} 7 | certificate-authority-data: ${certificate_authority_data} 8 | name: ${cluster_name} 9 | contexts: 10 | - context: 11 | cluster: ${cluster_name} 12 | user: ${cluster_name} 13 | name: ${cluster_name} 14 | current-context: ${cluster_name} 15 | users: 16 | - name: ${cluster_name} 17 | user: 18 | exec: 19 | apiVersion: client.authentication.k8s.io/v1alpha1 20 | command: aws-iam-authenticator 21 | args: 22 | - "token" 23 | - "-i" 24 | - "${cluster_name}" -------------------------------------------------------------------------------- /examples/eks-auto-mode/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: inflate 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: inflate 10 | template: 11 | metadata: 12 | labels: 13 | app: inflate 14 | spec: 15 | terminationGracePeriodSeconds: 0 16 | containers: 17 | - name: inflate 18 | image: nginx 19 | resources: 20 | requests: 21 | cpu: 1 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: inflate 27 | spec: 28 | selector: 29 | app: inflate 30 | ports: 31 | - port: 80 32 | targetPort: 80 33 | type: LoadBalancer 34 | -------------------------------------------------------------------------------- /.github/workflows/tf-checks.yml: -------------------------------------------------------------------------------- 1 | name: tf-checks 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | workflow_dispatch: 7 | jobs: 8 | tf-checks-aws-managed-example: 9 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master 10 | with: 11 | working_directory: './examples/aws_managed/' 12 | tf-checks-complete-example: 13 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master 14 | with: 15 | working_directory: './examples/complete/' 16 | tf-checks-self-managed-example: 17 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master 18 | with: 19 | working_directory: './examples/self_managed/' -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Cloud Drove 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /examples/aws_managed/output.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Cluster 3 | ################################################################################ 4 | 5 | output "cluster_arn" { 6 | description = "The Amazon Resource Name (ARN) of the cluster" 7 | value = module.eks.cluster_arn 8 | } 9 | 10 | output "cluster_endpoint" { 11 | description = "Endpoint for your Kubernetes API server" 12 | value = module.eks.cluster_endpoint 13 | } 14 | 15 | output "cluster_id" { 16 | description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" 17 | value = module.eks.cluster_id 18 | } 19 | 20 | output "cluster_name" { 21 | description = "The name of the EKS cluster" 22 | value = module.eks.cluster_name 23 | } 24 | 25 | output "cluster_iam_role" { 26 | description = "ARN of cluster IAM role" 27 | value = module.eks.cluster_iam_role_name 28 | } 29 | 30 | output "node_group_iam_role" { 31 | description = "ARN of node group IAM role" 32 | value = module.eks.node_group_iam_role_name 33 | } 34 | -------------------------------------------------------------------------------- /examples/eks-auto-mode/outputs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Cluster 3 | ################################################################################ 4 | 5 | output "cluster_arn" { 6 | description = "The Amazon Resource Name (ARN) of the cluster" 7 | value = module.eks.cluster_arn 8 | } 9 | 10 | output "cluster_endpoint" { 11 | description = "Endpoint for your Kubernetes API server" 12 | value = module.eks.cluster_endpoint 13 | } 14 | 15 | output "cluster_id" { 16 | description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" 17 | value = module.eks.cluster_id 18 | } 19 | 20 | output "cluster_name" { 21 | description = "The name of the EKS cluster" 22 | value = module.eks.cluster_name 23 | } 24 | 25 | output "cluster_iam_role" { 26 | description = "ARN of cluster IAM role" 27 | value = module.eks.cluster_iam_role_name 28 | } 29 | 30 | output "node_group_iam_role" { 31 | description = "ARN of node group IAM role" 32 | value = module.eks.node_group_iam_role_name 33 | } 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | eks-admin-cluster-role-binding.yaml 2 | eks-admin-service-account.yaml 3 | config-map-aws-auth*.yaml 4 | kubeconfig_* 5 | .idea 6 | 7 | ################################################################# 8 | # Default .gitignore content for all terraform-aws-modules below 9 | ################################################################# 10 | 11 | .DS_Store 12 | 13 | # Local .terraform directories 14 | **/.terraform/* 15 | 16 | # Terraform lockfile 17 | .terraform.lock.hcl 18 | 19 | # .tfstate files 20 | *.tfstate 21 | *.tfstate.* 22 | *.tfplan 23 | 24 | # Crash log files 25 | crash.log 26 | 27 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 28 | # password, private keys, and other secrets. These should not be part of version 29 | # control as they are data points which are potentially sensitive and subject 30 | # to change depending on the environment. 31 | *.tfvars 32 | 33 | # Ignore override files as they are usually used to override resources locally and so 34 | # are not checked in 35 | override.tf 36 | override.tf.json 37 | *_override.tf 38 | *_override.tf.json 39 | 40 | # Ignore CLI configuration files 41 | .terraformrc 42 | terraform.rc 43 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.58.0 4 | hooks: 5 | - id: terraform_fmt 6 | # - id: terraform_validate 7 | - id: terraform_docs 8 | args: 9 | - '--args=--lockfile=false' 10 | - id: terraform_tflint 11 | args: 12 | - '--args=--only=terraform_deprecated_interpolation' 13 | - '--args=--only=terraform_deprecated_index' 14 | - '--args=--only=terraform_unused_declarations' 15 | - '--args=--only=terraform_comment_syntax' 16 | - '--args=--only=terraform_documented_outputs' 17 | - '--args=--only=terraform_documented_variables' 18 | - '--args=--only=terraform_typed_variables' 19 | - '--args=--only=terraform_module_pinned_source' 20 | # - '--args=--only=terraform_naming_convention' 21 | - '--args=--only=terraform_required_version' 22 | - '--args=--only=terraform_required_providers' 23 | - '--args=--only=terraform_standard_module_structure' 24 | - '--args=--only=terraform_workspace_remote' 25 | - repo: https://github.com/pre-commit/pre-commit-hooks 26 | rev: v4.0.1 27 | hooks: 28 | - id: check-merge-conflict 29 | -------------------------------------------------------------------------------- /aws-auth-auto-mode.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Access Entry 3 | ################################################################################ 4 | 5 | data "aws_iam_session_context" "current" { 6 | 7 | # This data source provides information on the IAM source role of an STS assumed role 8 | # For non-role ARNs, this data source simply passes the ARN through issuer ARN 9 | arn = try(data.aws_caller_identity.current.arn, "") 10 | } 11 | 12 | 13 | resource "aws_eks_access_entry" "this" { 14 | for_each = { for k, v in local.merged_access_entries : k => v if var.create } 15 | 16 | cluster_name = aws_eks_cluster.default[0].id 17 | kubernetes_groups = try(each.value.kubernetes_groups, null) 18 | principal_arn = each.value.principal_arn 19 | type = try(each.value.type, "STANDARD") 20 | user_name = try(each.value.user_name, null) 21 | 22 | tags = merge(var.tags, try(each.value.tags, {})) 23 | } 24 | 25 | resource "aws_eks_access_policy_association" "this" { 26 | for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if var.create } 27 | 28 | access_scope { 29 | namespaces = try(each.value.association_access_scope_namespaces, []) 30 | type = each.value.association_access_scope_type 31 | } 32 | 33 | cluster_name = aws_eks_cluster.default[0].id 34 | 35 | policy_arn = each.value.association_policy_arn 36 | principal_arn = each.value.principal_arn 37 | 38 | depends_on = [ 39 | aws_eks_access_entry.this, 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /node_group/aws_managed/outputs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Launch template 3 | ################################################################################ 4 | 5 | output "launch_template_id" { 6 | description = "The ID of the launch template" 7 | value = try(aws_launch_template.this[0].id, "") 8 | } 9 | 10 | output "launch_template_arn" { 11 | description = "The ARN of the launch template" 12 | value = try(aws_launch_template.this[0].arn, "") 13 | } 14 | 15 | output "launch_template_latest_version" { 16 | description = "The latest version of the launch template" 17 | value = try(aws_launch_template.this[0].latest_version, "") 18 | } 19 | 20 | ################################################################################ 21 | # Node Group 22 | ################################################################################ 23 | 24 | output "node_group_arn" { 25 | description = "Amazon Resource Name (ARN) of the EKS Node Group" 26 | value = try(aws_eks_node_group.this[0].arn, "") 27 | } 28 | 29 | output "node_group_id" { 30 | description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)" 31 | value = try(aws_eks_node_group.this[0].id, "") 32 | } 33 | 34 | output "node_group_resources" { 35 | description = "List of objects containing information about underlying resources" 36 | value = try(aws_eks_node_group.this[0].resources, "") 37 | } 38 | 39 | output "node_group_status" { 40 | description = "Status of the EKS Node Group" 41 | value = try(aws_eks_node_group.this[0].arn, "") 42 | } 43 | -------------------------------------------------------------------------------- /.github/dependbot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | version: 2 6 | updates: 7 | - package-ecosystem: "terraform" # See documentation for possible values 8 | directory: "/" # Location of package manifests 9 | schedule: 10 | interval: "weekly" 11 | # Add assignees 12 | assignees: 13 | - "clouddrove-ci" 14 | # Add reviewer 15 | reviewers: 16 | - "approvers" 17 | - package-ecosystem: "terraform" # See documentation for possible values 18 | directory: "examples/aws_managed" # Location of package manifests 19 | schedule: 20 | interval: "weekly" 21 | # Add assignees 22 | assignees: 23 | - "clouddrove-ci" 24 | # Add reviewer 25 | reviewers: 26 | - "approvers" 27 | - package-ecosystem: "terraform" # See documentation for possible values 28 | directory: "examples/complete" # Location of package manifests 29 | schedule: 30 | interval: "weekly" 31 | # Add assignees 32 | assignees: 33 | - "clouddrove-ci" 34 | # Add reviewer 35 | reviewers: 36 | - "approvers" 37 | - package-ecosystem: "terraform" # See documentation for possible values 38 | directory: "examples/self_managed" # Location of package manifests 39 | schedule: 40 | interval: "weekly" 41 | # Add assignees 42 | assignees: 43 | - "clouddrove-ci" 44 | # Add reviewer 45 | reviewers: 46 | - "approvers" 47 | -------------------------------------------------------------------------------- /node_group/fargate_profile/variables.tf: -------------------------------------------------------------------------------- 1 | #Module : LABEL 2 | #Description : Terraform label module variables. 3 | variable "name" { 4 | type = string 5 | default = "" 6 | description = "Name (e.g. `app` or `cluster`)." 7 | } 8 | 9 | variable "environment" { 10 | type = string 11 | default = "" 12 | description = "Environment (e.g. `prod`, `dev`, `staging`)." 13 | } 14 | 15 | variable "label_order" { 16 | type = list(any) 17 | default = [] 18 | description = "Label order, e.g. `name`,`application`." 19 | } 20 | 21 | variable "attributes" { 22 | type = list(any) 23 | default = [] 24 | description = "Additional attributes (e.g. `1`)." 25 | } 26 | 27 | variable "tags" { 28 | type = map(any) 29 | default = {} 30 | description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." 31 | } 32 | 33 | variable "managedby" { 34 | type = string 35 | default = "hello@clouddorve.com" 36 | description = "ManagedBy, eg 'pps'." 37 | } 38 | 39 | variable "delimiter" { 40 | type = string 41 | default = "-" 42 | description = "Delimiter to be used between `organization`, `environment`, `name` and `attributes`." 43 | } 44 | 45 | variable "enabled" { 46 | type = bool 47 | default = true 48 | description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." 49 | } 50 | 51 | variable "fargate_enabled" { 52 | type = bool 53 | default = false 54 | description = "Whether fargate profile is enabled or not" 55 | } 56 | 57 | variable "fargate_profiles" { 58 | type = map(any) 59 | default = {} 60 | description = "The number of Fargate Profiles that would be created." 61 | } 62 | 63 | variable "cluster_name" { 64 | type = string 65 | default = "" 66 | description = "The name of the EKS cluster." 67 | } 68 | 69 | variable "subnet_ids" { 70 | type = list(string) 71 | description = "A list of subnet IDs to launch resources in." 72 | } -------------------------------------------------------------------------------- /kms.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "cloudwatch" { 2 | policy_id = "key-policy-cloudwatch" 3 | statement { 4 | sid = "Enable IAM User Permissions" 5 | actions = [ 6 | "kms:*", 7 | ] 8 | effect = "Allow" 9 | principals { 10 | type = "AWS" 11 | identifiers = [ 12 | format( 13 | "arn:%s:iam::%s:root", 14 | data.aws_partition.current.partition, 15 | data.aws_caller_identity.current.account_id 16 | ) 17 | ] 18 | } 19 | resources = ["*"] 20 | } 21 | statement { 22 | sid = "AllowCloudWatchLogs" 23 | actions = [ 24 | "kms:Encrypt*", 25 | "kms:Decrypt*", 26 | "kms:ReEncrypt*", 27 | "kms:GenerateDataKey*", 28 | "kms:Describe*" 29 | ] 30 | effect = "Allow" 31 | principals { 32 | type = "Service" 33 | identifiers = [ 34 | format( 35 | "logs.%s.amazonaws.com", 36 | data.aws_region.current.name 37 | ) 38 | ] 39 | } 40 | resources = ["*"] 41 | } 42 | } 43 | 44 | resource "aws_kms_key" "cluster" { 45 | count = var.enabled && var.cluster_encryption_config_enabled && var.external_cluster == false ? 1 : 0 46 | description = "EKS Cluster ${module.labels.id} Encryption Config KMS Key" 47 | enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation 48 | deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days 49 | policy = var.cluster_encryption_config_kms_key_policy 50 | tags = module.labels.tags 51 | } 52 | 53 | resource "aws_kms_key" "cloudwatch_log" { 54 | count = var.enabled && var.cluster_encryption_config_enabled && var.external_cluster == false ? 1 : 0 55 | description = "CloudWatch log group ${module.labels.id} Encryption Config KMS Key" 56 | enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation 57 | deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days 58 | policy = data.aws_iam_policy_document.cloudwatch.json 59 | tags = module.labels.tags 60 | } -------------------------------------------------------------------------------- /node_group/fargate_profile/fargate.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 3.1.15" 6 | } 7 | } 8 | } 9 | 10 | #Module : label 11 | #Description : Terraform module to create consistent naming for multiple names. 12 | module "labels" { 13 | source = "clouddrove/labels/aws" 14 | version = "1.3.0" 15 | 16 | name = var.name 17 | environment = var.environment 18 | managedby = var.managedby 19 | delimiter = var.delimiter 20 | attributes = compact(concat(var.attributes, ["fargate"])) 21 | label_order = var.label_order 22 | } 23 | 24 | 25 | #Module : IAM ROLE 26 | #Description : Provides an IAM role. 27 | resource "aws_iam_role" "fargate_role" { 28 | count = var.enabled && var.fargate_enabled ? 1 : 0 29 | 30 | name = format("%s-fargate-role", module.labels.id) 31 | assume_role_policy = data.aws_iam_policy_document.aws_eks_fargate_policy[0].json 32 | tags = module.labels.tags 33 | } 34 | 35 | resource "aws_iam_role_policy_attachment" "amazon_eks_fargate_pod_execution_role_policy" { 36 | count = var.enabled && var.fargate_enabled ? 1 : 0 37 | 38 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy" 39 | role = aws_iam_role.fargate_role[0].name 40 | } 41 | 42 | #Module : EKS Fargate 43 | #Descirption : Enabling fargate for AWS EKS 44 | resource "aws_eks_fargate_profile" "default" { 45 | for_each = var.enabled && var.fargate_enabled ? var.fargate_profiles : {} 46 | 47 | cluster_name = var.cluster_name 48 | fargate_profile_name = format("%s-%s", module.labels.id, each.value.addon_name) 49 | pod_execution_role_arn = aws_iam_role.fargate_role[0].arn 50 | subnet_ids = var.subnet_ids 51 | tags = module.labels.tags 52 | 53 | selector { 54 | namespace = lookup(each.value, "namespace", "default") 55 | labels = lookup(each.value, "labels", null) 56 | } 57 | } 58 | 59 | # AWS EKS Fargate policy 60 | data "aws_iam_policy_document" "aws_eks_fargate_policy" { 61 | count = var.enabled && var.fargate_enabled ? 1 : 0 62 | 63 | statement { 64 | effect = "Allow" 65 | actions = ["sts:AssumeRole"] 66 | 67 | principals { 68 | type = "Service" 69 | identifiers = ["eks-fargate-pods.amazonaws.com"] 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /security_groups.tf: -------------------------------------------------------------------------------- 1 | #Module : SECURITY GROUP 2 | #Description : Provides a security group resource. 3 | 4 | resource "aws_security_group" "node_group" { 5 | count = var.enabled && var.external_cluster == false ? 1 : 0 6 | name = "${module.labels.id}-node-group" 7 | description = "Security Group for nodes Groups" 8 | vpc_id = var.vpc_id 9 | tags = module.labels.tags 10 | } 11 | 12 | #Module : SECURITY GROUP RULE EGRESS 13 | #Description : Provides a security group rule resource. Represents a single egress group rule, 14 | # which can be added to external Security Groups. 15 | 16 | #tfsec:ignore:aws-ec2-no-public-egress-sgr ## To allow all outbound traffic from eks nodes. 17 | resource "aws_security_group_rule" "node_group" { 18 | count = var.enabled && var.external_cluster == false ? 1 : 0 19 | description = "Allow all egress traffic" 20 | from_port = 0 21 | to_port = 0 22 | protocol = "-1" 23 | cidr_blocks = ["0.0.0.0/0"] 24 | security_group_id = aws_security_group.node_group[0].id 25 | type = "egress" 26 | } 27 | 28 | #Module : SECURITY GROUP RULE INGRESS SELF 29 | #Description : Provides a security group rule resource. Represents a single ingress group rule, 30 | # which can be added to external Security Groups. 31 | resource "aws_security_group_rule" "ingress_self" { 32 | count = var.enabled && var.external_cluster == false ? 1 : 0 33 | description = "Allow nodes to communicate with each other" 34 | from_port = 0 35 | to_port = 65535 36 | protocol = "-1" 37 | security_group_id = aws_security_group.node_group[0].id 38 | source_security_group_id = aws_security_group.node_group[0].id 39 | type = "ingress" 40 | } 41 | 42 | #Module : SECURITY GROUP 43 | #Description : Provides a security group rule resource. Represents a single ingress group rule, 44 | # which can be added to external Security Groups. 45 | resource "aws_security_group_rule" "ingress_security_groups_node_group" { 46 | count = var.enabled && var.external_cluster == false ? length(var.allowed_security_groups) : 0 47 | description = "Allow inbound traffic from existing Security Groups" 48 | from_port = 0 49 | to_port = 65535 50 | protocol = "-1" 51 | source_security_group_id = element(var.allowed_security_groups, count.index) 52 | security_group_id = aws_security_group.node_group[0].id 53 | type = "ingress" 54 | } 55 | 56 | #Module : SECURITY GROUP RULE CIDR BLOCK 57 | #Description : Provides a security group rule resource. Represents a single ingress group rule, 58 | # which can be added to external Security Groups. 59 | resource "aws_security_group_rule" "ingress_cidr_blocks_node_group" { 60 | count = var.enabled && var.external_cluster == false && length(var.allowed_cidr_blocks) > 0 ? 1 : 0 61 | description = "Allow inbound traffic from CIDR blocks" 62 | from_port = 0 63 | to_port = 0 64 | protocol = "-1" 65 | cidr_blocks = var.allowed_cidr_blocks 66 | security_group_id = aws_security_group.node_group[0].id 67 | type = "ingress" 68 | } -------------------------------------------------------------------------------- /node_group/self_managed/outputs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Launch template 3 | ################################################################################ 4 | 5 | output "launch_template_id" { 6 | description = "The ID of the launch template" 7 | value = try(aws_launch_template.this[0].id, "") 8 | } 9 | 10 | output "launch_template_arn" { 11 | description = "The ARN of the launch template" 12 | value = try(aws_launch_template.this[0].arn, "") 13 | } 14 | 15 | output "launch_template_latest_version" { 16 | description = "The latest version of the launch template" 17 | value = try(aws_launch_template.this[0].latest_version, "") 18 | } 19 | 20 | ################################################################################ 21 | # autoscaling group 22 | ################################################################################ 23 | 24 | output "autoscaling_group_name" { 25 | description = "The autoscaling group name" 26 | value = try(aws_autoscaling_group.this[0].name, "") 27 | } 28 | 29 | output "autoscaling_group_arn" { 30 | description = "The ARN for this autoscaling group" 31 | value = try(aws_autoscaling_group.this[0].arn, "") 32 | } 33 | 34 | output "autoscaling_group_id" { 35 | description = "The autoscaling group id" 36 | value = try(aws_autoscaling_group.this[0].id, "") 37 | } 38 | 39 | output "autoscaling_group_min_size" { 40 | description = "The minimum size of the autoscaling group" 41 | value = try(aws_autoscaling_group.this[0].min_size, "") 42 | } 43 | 44 | output "autoscaling_group_max_size" { 45 | description = "The maximum size of the autoscaling group" 46 | value = try(aws_autoscaling_group.this[0].max_size, "") 47 | } 48 | 49 | output "autoscaling_group_desired_capacity" { 50 | description = "The number of Amazon EC2 instances that should be running in the group" 51 | value = try(aws_autoscaling_group.this[0].desired_capacity, "") 52 | } 53 | 54 | output "autoscaling_group_default_cooldown" { 55 | description = "Time between a scaling activity and the succeeding scaling activity" 56 | value = try(aws_autoscaling_group.this[0].default_cooldown, "") 57 | } 58 | 59 | output "autoscaling_group_health_check_grace_period" { 60 | description = "Time after instance comes into service before checking health" 61 | value = try(aws_autoscaling_group.this[0].health_check_grace_period, "") 62 | } 63 | 64 | output "autoscaling_group_health_check_type" { 65 | description = "EC2 or ELB. Controls how health checking is done" 66 | value = try(aws_autoscaling_group.this[0].health_check_type, "") 67 | } 68 | 69 | output "autoscaling_group_availability_zones" { 70 | description = "The availability zones of the autoscaling group" 71 | value = try(aws_autoscaling_group.this[0].availability_zones, "") 72 | } 73 | 74 | output "autoscaling_group_vpc_zone_identifier" { 75 | description = "The VPC zone identifier" 76 | value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "") 77 | } 78 | 79 | ################################################################################ 80 | # autoscaling group schedule 81 | ################################################################################ 82 | 83 | output "autoscaling_group_schedule_arns" { 84 | description = "ARNs of autoscaling group schedules" 85 | value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } 86 | } 87 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_arn" { 2 | value = try(aws_eks_cluster.default[0].arn, "") 3 | description = "The Amazon Resource Name (ARN) of the cluster" 4 | } 5 | 6 | output "cluster_certificate_authority_data" { 7 | value = try(aws_eks_cluster.default[0].certificate_authority[0].data, "") 8 | description = "Base64 encoded certificate data required to communicate with the cluster" 9 | } 10 | 11 | output "cluster_endpoint" { 12 | value = try(aws_eks_cluster.default[0].endpoint, "") 13 | description = "Endpoint for your Kubernetes API server" 14 | } 15 | 16 | output "cluster_id" { 17 | value = try(aws_eks_cluster.default[0].id, "") 18 | description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" 19 | } 20 | 21 | output "cluster_oidc_issuer_url" { 22 | value = try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, "") 23 | description = "The URL on the EKS cluster for the OpenID Connect identity provider" 24 | } 25 | 26 | output "cluster_platform_version" { 27 | value = try(aws_eks_cluster.default[0].platform_version, "") 28 | description = "Platform version for the cluster" 29 | } 30 | 31 | output "cluster_status" { 32 | value = try(aws_eks_cluster.default[0].status, "") 33 | description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" 34 | } 35 | 36 | output "cluster_primary_security_group_id" { 37 | value = try(aws_eks_cluster.default[0].vpc_config[0].cluster_security_group_id, "") 38 | description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use default security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" 39 | } 40 | 41 | output "node_security_group_arn" { 42 | description = "Amazon Resource Name (ARN) of the node shared security group" 43 | value = try(aws_security_group.node_group[0].arn, "") 44 | } 45 | 46 | output "node_security_group_id" { 47 | value = try(aws_security_group.node_group[0].id, "") 48 | description = "ID of the node shared security group" 49 | } 50 | 51 | output "oidc_provider_arn" { 52 | value = try(aws_iam_openid_connect_provider.default[0].arn, "") 53 | description = "The ARN of the OIDC Provider if `enable_irsa = true`" 54 | } 55 | 56 | output "cluster_iam_role_name" { 57 | value = try(aws_iam_role.default[0].name, "") 58 | description = "IAM role name of the EKS cluster" 59 | } 60 | 61 | output "cluster_iam_role_arn" { 62 | value = try(aws_iam_role.default[0].arn, "") 63 | description = "IAM role ARN of the EKS cluster" 64 | } 65 | 66 | output "cluster_iam_role_unique_id" { 67 | value = try(aws_iam_role.default[0].unique_id, "") 68 | description = "Stable and unique string identifying the IAM role" 69 | } 70 | 71 | output "node_group_iam_role_name" { 72 | value = try(aws_iam_role.node_groups[0].name, "") 73 | description = "IAM role name of the EKS cluster" 74 | } 75 | 76 | output "node_group_iam_role_arn" { 77 | value = try(aws_iam_role.node_groups[0].arn, "") 78 | description = "IAM role ARN of the EKS cluster" 79 | } 80 | 81 | output "node_group_iam_role_unique_id" { 82 | value = try(aws_iam_role.node_groups[0].unique_id, "") 83 | description = "Stable and unique string identifying the IAM role" 84 | } 85 | 86 | output "tags" { 87 | value = module.labels.tags 88 | } 89 | 90 | output "cluster_name" { 91 | value = module.labels.id 92 | } -------------------------------------------------------------------------------- /aws_auth.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | # The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster 4 | # to allow worker nodes to join the cluster via AWS IAM role authentication. 5 | 6 | # NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster), 7 | # the requirements outlined here must be met: 8 | # https://learn.hashicorp.com/terraform/aws/eks-intro#preparation 9 | # https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks 10 | # https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes 11 | 12 | # Additional links 13 | # https://learn.hashicorp.com/terraform/aws/eks-intro 14 | # https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b 15 | # https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html 16 | # https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html 17 | # https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html 18 | # https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html 19 | # https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb 20 | # http://marcinkaszynski.com/2018/07/12/eks-auth.html 21 | # https://cloud.google.com/kubernetes-engine/docs/concepts/configmap 22 | # http://yaml-multiline.info 23 | # https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216 24 | # https://www.terraform.io/docs/cloud/run/install-software.html 25 | # https://stackoverflow.com/questions/26123740/is-it-possible-to-install-aws-cli-package-without-root-permission 26 | # https://stackoverflow.com/questions/58232731/kubectl-missing-form-terraform-cloud 27 | # https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html 28 | # https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html 29 | 30 | resource "null_resource" "wait_for_cluster" { 31 | count = var.enabled && var.external_cluster == false && var.apply_config_map_aws_auth ? 1 : 0 32 | depends_on = [aws_eks_cluster.default[0]] 33 | 34 | provisioner "local-exec" { 35 | command = var.wait_for_cluster_command 36 | interpreter = var.local_exec_interpreter 37 | environment = { 38 | ENDPOINT = aws_eks_cluster.default[0].endpoint 39 | } 40 | } 41 | } 42 | 43 | data "aws_eks_cluster" "eks" { 44 | count = var.enabled && var.external_cluster == false && var.apply_config_map_aws_auth ? 1 : 0 45 | name = aws_eks_cluster.default[0].id 46 | } 47 | 48 | # Get an authentication token to communicate with the EKS cluster. 49 | # By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster. 50 | # `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token. 51 | # If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token. 52 | # https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html 53 | data "aws_eks_cluster_auth" "eks" { 54 | count = var.enabled && var.external_cluster == false && var.apply_config_map_aws_auth ? 1 : 0 55 | name = aws_eks_cluster.default[0].id 56 | } 57 | 58 | provider "kubernetes" { 59 | token = var.apply_config_map_aws_auth && var.external_cluster == false ? data.aws_eks_cluster_auth.eks[0].token : "" 60 | host = var.apply_config_map_aws_auth && var.external_cluster == false ? data.aws_eks_cluster.eks[0].endpoint : "" 61 | cluster_ca_certificate = var.apply_config_map_aws_auth && var.external_cluster == false ? base64decode(data.aws_eks_cluster.eks[0].certificate_authority[0].data) : "" 62 | } 63 | 64 | resource "kubernetes_config_map" "aws_auth_ignore_changes" { 65 | count = var.enabled && var.external_cluster == false && var.apply_config_map_aws_auth ? 1 : 0 66 | depends_on = [null_resource.wait_for_cluster[0]] 67 | 68 | metadata { 69 | name = "aws-auth" 70 | namespace = "kube-system" 71 | } 72 | 73 | data = { 74 | mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))) 75 | mapUsers = yamlencode(var.map_additional_iam_users) 76 | mapAccounts = yamlencode(var.map_additional_aws_accounts) 77 | } 78 | 79 | lifecycle { 80 | ignore_changes = [data["mapRoles"]] 81 | } 82 | } -------------------------------------------------------------------------------- /examples/eks-nodegroup-with-existing-cluster/example.tf: -------------------------------------------------------------------------------- 1 | ##-------------------------------------------------------------------- 2 | ## LOCALS 3 | ##-------------------------------------------------------------------- 4 | locals { 5 | eks_cluster_name = "clouddrove-eks" 6 | region = "us-east-1" 7 | environment = "test" 8 | } 9 | 10 | ##-------------------------------------------------------------------- 11 | ## PROVIDERS 12 | ##-------------------------------------------------------------------- 13 | provider "aws" { 14 | region = local.region 15 | } 16 | 17 | # Kubernetes provider using the fetched cluster's details 18 | provider "kubernetes" { 19 | host = data.aws_eks_cluster.this.endpoint 20 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 21 | token = data.aws_eks_cluster_auth.this.token 22 | } 23 | 24 | ##-------------------------------------------------------------------- 25 | ## DATA SOURCES 26 | ##-------------------------------------------------------------------- 27 | 28 | # Fetch existing EKS cluster 29 | data "aws_eks_cluster" "this" { 30 | name = local.eks_cluster_name 31 | } 32 | 33 | # Fetch authentication token for the EKS cluster 34 | data "aws_eks_cluster_auth" "this" { 35 | name = data.aws_eks_cluster.this.name 36 | } 37 | 38 | data "aws_iam_policy_document" "default" { 39 | statement { 40 | effect = "Allow" 41 | actions = ["sts:AssumeRole"] 42 | principals { 43 | type = "Service" 44 | identifiers = ["ec2.amazonaws.com"] 45 | } 46 | } 47 | } 48 | 49 | data "aws_iam_policy_document" "amazon_eks_node_group_autoscaler_policy" { 50 | statement { 51 | actions = [ 52 | "autoscaling:DescribeAutoScalingGroups", 53 | "autoscaling:DescribeAutoScalingInstances", 54 | "autoscaling:DescribeLaunchConfigurations", 55 | "autoscaling:DescribeTags", 56 | "autoscaling:SetDesiredCapacity", 57 | "autoscaling:TerminateInstanceInAutoScalingGroup", 58 | "autoscaling:TerminateInstanceInAutoScalingGroup", 59 | "ec2:DescribeLaunchTemplateVersions", 60 | "ecr:*" 61 | ] 62 | effect = "Allow" 63 | resources = ["*"] 64 | } 65 | } 66 | 67 | 68 | 69 | ##-------------------------------------------------------------------- 70 | ## MODULE CALL 71 | ##-------------------------------------------------------------------- 72 | 73 | module "node-group-role" { 74 | source = "clouddrove/iam-role/aws" 75 | version = "1.3.2" 76 | 77 | name = "${local.eks_cluster_name}-node-group" 78 | environment = local.environment 79 | 80 | # Allow EC2 to assume role 81 | assume_role_policy = data.aws_iam_policy_document.default.json 82 | policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy.json 83 | 84 | # Attach managed policies required for EKS worker nodes 85 | policy_enabled = true 86 | 87 | managed_policy_arns = [ 88 | "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", 89 | "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", 90 | "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", 91 | "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly" 92 | ] 93 | } 94 | 95 | 96 | # If your base module is disabled 97 | module "eks" { 98 | source = "../../" 99 | cluster_name = local.eks_cluster_name 100 | enabled = true 101 | external_cluster = true 102 | subnet_filter_name = "tag:kubernetes.io/cluster/${local.eks_cluster_name}" 103 | subnet_filter_values = ["owned", "shared"] 104 | region = local.region 105 | node_role_arn = module.node-group-role.arn 106 | subnet_ids = data.aws_eks_cluster.this.vpc_config[0].subnet_ids 107 | 108 | managed_node_group_defaults = { 109 | subnet_ids = data.aws_eks_cluster.this.vpc_config[0].subnet_ids 110 | nodes_additional_security_group_ids = [""] # Replace with your actual security group IDs if needed 111 | tags = { 112 | "kubernetes.io/cluster/${local.eks_cluster_name}" = "shared" 113 | "k8s.io/cluster/${local.eks_cluster_name}" = "shared" 114 | } 115 | block_device_mappings = { 116 | xvda = { 117 | device_name = "/dev/xvda" 118 | ebs = { 119 | volume_size = 50 120 | volume_type = "gp3" 121 | iops = 3000 122 | throughput = 150 123 | encrypted = false 124 | } 125 | } 126 | } 127 | } 128 | 129 | managed_node_group = { 130 | additional = { 131 | name = "additional" 132 | capacity_type = "SPOT" 133 | min_size = 1 134 | max_size = 2 135 | desired_size = 1 136 | force_update_version = true 137 | instance_types = ["t3.medium"] 138 | ami_type = "BOTTLEROCKET_x86_64" 139 | } 140 | } 141 | } -------------------------------------------------------------------------------- /locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | aws_caller_identity_account_id = data.aws_caller_identity.current.account_id 3 | aws_caller_identity_arn = data.aws_caller_identity.current.arn 4 | eks_oidc_provider_arn = replace(data.aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer, "https://", "") 5 | eks_oidc_issuer_url = data.aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer 6 | eks_cluster_id = data.aws_eks_cluster.eks_cluster.id 7 | aws_eks_cluster_endpoint = data.aws_eks_cluster.eks_cluster.endpoint 8 | # Encryption 9 | cluster_encryption_config = { 10 | resources = var.cluster_encryption_config_resources 11 | provider_key_arn = var.enabled && var.external_cluster == false ? aws_kms_key.cluster[0].arn : null 12 | } 13 | aws_policy_prefix = format("arn:%s:iam::aws:policy", data.aws_partition.current.partition) 14 | create_outposts_local_cluster = length(var.outpost_config) > 0 15 | auto_mode_enabled = try(var.cluster_compute_config.enabled, false) 16 | 17 | # EKS auto node group locals 18 | create_iam_role = var.enabled 19 | create_node_iam_role = var.enabled && var.create_node_iam_role && local.auto_mode_enabled 20 | node_iam_role_name = coalesce(var.node_iam_role_name, "${var.name}-eks-auto") 21 | 22 | # Standard EKS cluster 23 | eks_standard_iam_role_policies = { for k, v in { 24 | AmazonEKSClusterPolicy = "${local.aws_policy_prefix}/AmazonEKSClusterPolicy", 25 | } : k => v if !local.create_outposts_local_cluster && !local.auto_mode_enabled } 26 | 27 | # EKS cluster with EKS auto mode enabled 28 | eks_auto_mode_iam_role_policies = { for k, v in { 29 | AmazonEKSClusterPolicy = "${local.aws_policy_prefix}/AmazonEKSClusterPolicy" 30 | AmazonEKSComputePolicy = "${local.aws_policy_prefix}/AmazonEKSComputePolicy" 31 | AmazonEKSBlockStoragePolicy = "${local.aws_policy_prefix}/AmazonEKSBlockStoragePolicy" 32 | AmazonEKSLoadBalancingPolicy = "${local.aws_policy_prefix}/AmazonEKSLoadBalancingPolicy" 33 | AmazonEKSNetworkingPolicy = "${local.aws_policy_prefix}/AmazonEKSNetworkingPolicy" 34 | } : k => v if !local.create_outposts_local_cluster && local.auto_mode_enabled } 35 | 36 | # EKS local cluster on Outposts 37 | eks_outpost_iam_role_policies = { for k, v in { 38 | AmazonEKSClusterPolicy = "${local.aws_policy_prefix}/AmazonEKSLocalOutpostClusterPolicy" 39 | } : k => v if local.create_outposts_local_cluster && !local.auto_mode_enabled } 40 | 41 | #aws_auth locals 42 | certificate_authority_data_list = coalescelist(aws_eks_cluster.default[*].certificate_authority, [[{ data : "" }]]) 43 | certificate_authority_data_list_internal = local.certificate_authority_data_list[0] 44 | certificate_authority_data_map = local.certificate_authority_data_list_internal[0] 45 | certificate_authority_data = local.certificate_authority_data_map["data"] 46 | 47 | # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap 48 | # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically 49 | map_worker_roles = [ 50 | { 51 | rolearn : try(aws_iam_role.node_groups[0].arn, var.node_role_arn) 52 | username : "system:node:{{EC2PrivateDNSName}}" 53 | groups : [ 54 | "system:bootstrappers", 55 | "system:nodes" 56 | ] 57 | } 58 | ] 59 | 60 | # access entry locals 61 | # This replaces the one time logic from the EKS API with something that can be 62 | # better controlled by users through Terraform 63 | bootstrap_cluster_creator_admin_permissions = { 64 | cluster_creator = { 65 | principal_arn = try(data.aws_iam_session_context.current.issuer_arn, "") 66 | type = "STANDARD" 67 | 68 | policy_associations = { 69 | admin = { 70 | policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" 71 | access_scope = { 72 | type = "cluster" 73 | } 74 | } 75 | } 76 | } 77 | } 78 | 79 | # Merge the bootstrap behavior with the entries that users provide 80 | merged_access_entries = merge( 81 | { for k, v in local.bootstrap_cluster_creator_admin_permissions : k => v if var.enable_cluster_creator_admin_permissions }, 82 | var.access_entries, 83 | ) 84 | 85 | # Flatten out entries and policy associations so users can specify the policy 86 | # associations within a single entry 87 | flattened_access_entries = flatten([ 88 | for entry_key, entry_val in local.merged_access_entries : [ 89 | for pol_key, pol_val in lookup(entry_val, "policy_associations", {}) : 90 | merge( 91 | { 92 | principal_arn = entry_val.principal_arn 93 | entry_key = entry_key 94 | pol_key = pol_key 95 | }, 96 | { for k, v in { 97 | association_policy_arn = pol_val.policy_arn 98 | association_access_scope_type = pol_val.access_scope.type 99 | association_access_scope_namespaces = lookup(pol_val.access_scope, "namespaces", []) 100 | } : k => v if !contains(["EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX", "HYBRID_LINUX"], lookup(entry_val, "type", "STANDARD")) }, 101 | ) 102 | ] 103 | ]) 104 | 105 | # node_groups locals 106 | metadata_options = { 107 | http_endpoint = "enabled" 108 | http_tokens = "required" 109 | http_put_response_hop_limit = 2 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /aws_node_groups.tf: -------------------------------------------------------------------------------- 1 | module "eks_managed_node_group" { 2 | source = "./node_group/aws_managed" 3 | 4 | for_each = { for k, v in var.managed_node_group : k => v if var.enabled } 5 | 6 | enabled = try(each.value.enabled, true) 7 | 8 | cluster_name = try(aws_eks_cluster.default[0].name, data.aws_eks_cluster.eks_cluster.name) 9 | cluster_version = var.kubernetes_version 10 | vpc_security_group_ids = compact( 11 | concat( 12 | aws_security_group.node_group[*].id, 13 | aws_eks_cluster.default[*].vpc_config[0].cluster_security_group_id, 14 | var.nodes_additional_security_group_ids 15 | 16 | ) 17 | ) 18 | # EKS Managed Node Group 19 | name = try(each.value.name, each.key) 20 | environment = try(each.value.name != "" ? "" : var.environment, var.environment) 21 | repository = var.repository 22 | subnet_ids = try(each.value.subnet_ids, var.managed_node_group_defaults.subnet_ids, var.subnet_ids) 23 | 24 | min_size = try(each.value.min_size, var.managed_node_group_defaults.min_size, 1) 25 | max_size = try(each.value.max_size, var.managed_node_group_defaults.max_size, 3) 26 | desired_size = try(each.value.desired_size, var.managed_node_group_defaults.desired_size, 1) 27 | 28 | ami_id = try(each.value.ami_id, var.managed_node_group_defaults.ami_id, "") 29 | ami_type = try(each.value.ami_type, var.managed_node_group_defaults.ami_type, null) 30 | ami_release_version = try(each.value.ami_release_version, var.managed_node_group_defaults.ami_release_version, null) 31 | 32 | capacity_type = try(each.value.capacity_type, var.managed_node_group_defaults.capacity_type, null) 33 | disk_size = try(each.value.disk_size, var.managed_node_group_defaults.disk_size, null) 34 | force_update_version = try(each.value.force_update_version, var.managed_node_group_defaults.force_update_version, null) 35 | instance_types = try(each.value.instance_types, var.managed_node_group_defaults.instance_types, null) 36 | labels = try(each.value.labels, var.managed_node_group_defaults.labels, null) 37 | 38 | remote_access = try(each.value.remote_access, var.managed_node_group_defaults.remote_access, {}) 39 | taints = try(each.value.taints, var.managed_node_group_defaults.taints, {}) 40 | update_config = try(each.value.update_config, var.managed_node_group_defaults.update_config, {}) 41 | timeouts = try(each.value.timeouts, var.managed_node_group_defaults.timeouts, {}) 42 | 43 | #------------ASG-Schedule-------------------------------------------------- 44 | create_schedule = try(each.value.create_schedule, var.managed_node_group_defaults.create_schedule, true) 45 | schedules = try(each.value.schedules, var.managed_node_group_defaults.schedules, var.schedules) 46 | 47 | # Launch Template 48 | launch_template_description = try(each.value.launch_template_description, var.managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") 49 | launch_template_tags = try(each.value.launch_template_tags, var.managed_node_group_defaults.launch_template_tags, {}) 50 | 51 | ebs_optimized = try(each.value.ebs_optimized, var.managed_node_group_defaults.ebs_optimized, null) 52 | key_name = try(each.value.key_name, var.managed_node_group_defaults.key_name, null) 53 | kms_key_id = try(each.value.kms_key_id, var.managed_node_group_defaults.ebs_optimized, null) 54 | 55 | launch_template_default_version = try(each.value.launch_template_default_version, var.managed_node_group_defaults.launch_template_default_version, null) 56 | update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.managed_node_group_defaults.update_launch_template_default_version, true) 57 | disable_api_termination = try(each.value.disable_api_termination, var.managed_node_group_defaults.disable_api_termination, null) 58 | kernel_id = try(each.value.kernel_id, var.managed_node_group_defaults.kernel_id, null) 59 | ram_disk_id = try(each.value.ram_disk_id, var.managed_node_group_defaults.ram_disk_id, null) 60 | 61 | block_device_mappings = try(each.value.block_device_mappings, var.managed_node_group_defaults.block_device_mappings, {}) 62 | capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.managed_node_group_defaults.capacity_reservation_specification, null) 63 | cpu_options = try(each.value.cpu_options, var.managed_node_group_defaults.cpu_options, null) 64 | credit_specification = try(each.value.credit_specification, var.managed_node_group_defaults.credit_specification, null) 65 | enclave_options = try(each.value.enclave_options, var.managed_node_group_defaults.enclave_options, null) 66 | license_specifications = try(each.value.license_specifications, var.managed_node_group_defaults.license_specifications, null) 67 | metadata_options = try(each.value.metadata_options, var.managed_node_group_defaults.metadata_options, local.metadata_options) 68 | enable_monitoring = try(each.value.enable_monitoring, var.managed_node_group_defaults.enable_monitoring, true) 69 | network_interfaces = try(each.value.network_interfaces, var.managed_node_group_defaults.network_interfaces, []) 70 | placement = try(each.value.placement, var.managed_node_group_defaults.placement, null) 71 | 72 | # IAM role 73 | iam_role_arn = try(aws_iam_role.node_groups[0].arn, var.node_role_arn) 74 | 75 | tags = merge(var.tags, try(each.value.tags, var.managed_node_group_defaults.tags, {})) 76 | } 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | 2 | #Module : label 3 | #Description : Terraform module to create consistent naming for multiple names. 4 | 5 | module "labels" { 6 | source = "clouddrove/labels/aws" 7 | version = "1.3.0" 8 | 9 | name = var.name 10 | repository = var.repository 11 | environment = var.environment 12 | managedby = var.managedby 13 | attributes = compact(concat(var.attributes, ["cluster"])) 14 | extra_tags = var.tags 15 | label_order = var.label_order 16 | } 17 | 18 | #Cloudwatch: Logs for Eks cluster 19 | resource "aws_cloudwatch_log_group" "default" { 20 | count = var.enabled && var.external_cluster == false && length(var.enabled_cluster_log_types) > 0 ? 1 : 0 21 | name = "/aws/eks/${module.labels.id}/cluster" 22 | retention_in_days = var.cluster_log_retention_period 23 | tags = module.labels.tags 24 | kms_key_id = aws_kms_key.cloudwatch_log[0].arn 25 | } 26 | 27 | #tfsec:ignore:aws-eks-no-public-cluster-access ## To provide eks endpoint public access from local network 28 | #tfsec:ignore:aws-eks-no-public-cluster-access-to-cidr ## To provide eks endpoint public access from local network 29 | resource "aws_eks_cluster" "default" { 30 | count = var.enabled && var.external_cluster == false ? 1 : 0 31 | name = module.labels.id 32 | role_arn = aws_iam_role.default[0].arn 33 | version = var.kubernetes_version 34 | enabled_cluster_log_types = var.enabled_cluster_log_types 35 | 36 | access_config { 37 | authentication_mode = var.authentication_mode 38 | bootstrap_cluster_creator_admin_permissions = var.enable_cluster_creator_admin_permissions 39 | } 40 | vpc_config { 41 | subnet_ids = var.subnet_ids 42 | endpoint_private_access = var.endpoint_private_access 43 | endpoint_public_access = var.endpoint_public_access 44 | public_access_cidrs = var.public_access_cidrs 45 | security_group_ids = var.eks_additional_security_group_ids 46 | } 47 | 48 | dynamic "encryption_config" { 49 | for_each = var.cluster_encryption_config_enabled ? [local.cluster_encryption_config] : [] 50 | content { 51 | resources = lookup(encryption_config.value, "resources") 52 | provider { 53 | key_arn = lookup(encryption_config.value, "provider_key_arn") 54 | } 55 | } 56 | } 57 | 58 | timeouts { 59 | create = lookup(var.cluster_timeouts, "create", null) 60 | update = lookup(var.cluster_timeouts, "update", null) 61 | delete = lookup(var.cluster_timeouts, "delete", null) 62 | } 63 | 64 | 65 | dynamic "outpost_config" { 66 | for_each = local.create_outposts_local_cluster ? [var.outpost_config] : [] 67 | 68 | content { 69 | control_plane_instance_type = outpost_config.value.control_plane_instance_type 70 | outpost_arns = outpost_config.value.outpost_arns 71 | } 72 | } 73 | 74 | tags = merge( 75 | module.labels.tags, 76 | var.eks_tags 77 | ) 78 | 79 | depends_on = [ 80 | aws_iam_role_policy_attachment.amazon_eks_cluster_policy, 81 | aws_iam_role_policy_attachment.amazon_eks_service_policy, 82 | aws_cloudwatch_log_group.default, 83 | ] 84 | 85 | 86 | bootstrap_self_managed_addons = local.auto_mode_enabled ? coalesce(var.bootstrap_self_managed_addons, false) : var.bootstrap_self_managed_addons 87 | 88 | dynamic "compute_config" { 89 | for_each = length(var.cluster_compute_config) > 0 ? [var.cluster_compute_config] : [] 90 | 91 | content { 92 | enabled = local.auto_mode_enabled 93 | node_pools = local.auto_mode_enabled ? try(compute_config.value.node_pools, []) : null 94 | node_role_arn = local.auto_mode_enabled && length(try(compute_config.value.node_pools, [])) > 0 ? aws_iam_role.eks_auto[0].arn : null 95 | } 96 | } 97 | 98 | dynamic "storage_config" { 99 | for_each = local.auto_mode_enabled ? [1] : [] 100 | 101 | content { 102 | block_storage { 103 | enabled = local.auto_mode_enabled 104 | } 105 | } 106 | } 107 | 108 | dynamic "kubernetes_network_config" { 109 | # Not valid on Outposts 110 | for_each = local.create_outposts_local_cluster ? [] : [1] 111 | 112 | content { 113 | dynamic "elastic_load_balancing" { 114 | for_each = local.auto_mode_enabled ? [1] : [] 115 | 116 | content { 117 | enabled = local.auto_mode_enabled 118 | } 119 | } 120 | 121 | ip_family = var.cluster_ip_family 122 | service_ipv4_cidr = var.cluster_service_ipv4_cidr 123 | service_ipv6_cidr = var.cluster_service_ipv6_cidr 124 | } 125 | } 126 | lifecycle { 127 | ignore_changes = [ 128 | access_config[0].bootstrap_cluster_creator_admin_permissions 129 | ] 130 | } 131 | } 132 | 133 | data "tls_certificate" "cluster" { 134 | count = var.enabled && var.oidc_provider_enabled && var.external_cluster == false ? 1 : 0 135 | url = aws_eks_cluster.default[0].identity[0].oidc[0].issuer 136 | } 137 | 138 | resource "aws_iam_openid_connect_provider" "default" { 139 | count = var.enabled && var.oidc_provider_enabled && var.external_cluster == false ? 1 : 0 140 | url = try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, local.eks_oidc_provider_arn) 141 | 142 | # url = can(regex("^https://", try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, local.eks_oidc_provider_arn))) ? try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, local.eks_oidc_provider_arn) : "https://${try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, local.eks_oidc_provider_arn)}" 143 | 144 | client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences))) 145 | thumbprint_list = [data.tls_certificate.cluster[0].certificates[0].sha1_fingerprint] 146 | tags = module.labels.tags 147 | } 148 | 149 | resource "aws_eks_addon" "cluster" { 150 | for_each = var.enabled && var.external_cluster == false ? { for addon in var.addons : addon.addon_name => addon } : {} 151 | 152 | cluster_name = aws_eks_cluster.default[0].name 153 | addon_name = each.key 154 | addon_version = lookup(each.value, "addon_version", null) 155 | resolve_conflicts_on_create = lookup(each.value, "resolve_conflicts", null) 156 | resolve_conflicts_on_update = lookup(each.value, "resolve_conflicts", null) 157 | service_account_role_arn = lookup(each.value, "service_account_role_arn", null) 158 | 159 | tags = module.labels.tags 160 | } 161 | -------------------------------------------------------------------------------- /README.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # This is the canonical configuration for the `README.md` 4 | # Run `make readme` to rebuild the `README.md` 5 | # 6 | 7 | # Name of this project 8 | name: Terraform AWS EKS 9 | 10 | # License of this project 11 | license: "MIT" 12 | 13 | # Canonical GitHub repo 14 | github_repo: clouddrove/terraform-aws-eks 15 | 16 | # Badges to display 17 | badges: 18 | - name: "Terraform" 19 | image: "https://img.shields.io/badge/Terraform-v0.13-green" 20 | url: "https://www.terraform.io" 21 | - name: "tfsec" 22 | image: "https://github.com/clouddrove/terraform-aws-eks/actions/workflows/tfsec.yml/badge.svg" 23 | url: "https://github.com/clouddrove/terraform-aws-eks/actions/workflows/tfsec.yml" 24 | - name: "Licence" 25 | image: "https://img.shields.io/badge/License-APACHE-blue.svg" 26 | url: "LICENSE.md" 27 | - name: "Changelog" 28 | image: "https://img.shields.io/badge/Changelog-blue" 29 | url: "CHANGELOG.md" 30 | 31 | # Prerequesties to display 32 | prerequesties: 33 | - name: Terraform 34 | url: https://learn.hashicorp.com/terraform/getting-started/install.html 35 | version: ">= 1.5.4" 36 | 37 | providers: 38 | - name: aws 39 | url: https://aws.amazon.com/ 40 | version: ">= 5.11.0" 41 | 42 | module_dependencies: 43 | - name: Labels Module 44 | url: https://github.com/clouddrove/terraform-aws-labels 45 | description: Provides resource tagging. 46 | 47 | # description of this project 48 | description: |- 49 | Terraform module will be created Autoscaling, Workers, EKS, Node Groups. 50 | 51 | # How to use this project 52 | usage : |- 53 | ### Sample example 54 | Here is an example of how you can use this module in your inventory structure: 55 | ```hcl 56 | module "eks" { 57 | source = "clouddrove/eks/aws" 58 | version = "1.0.1" 59 | 60 | name = "eks" 61 | environment = "test" 62 | label_order = ["environment", "name"] 63 | enabled = true 64 | 65 | kubernetes_version = "1.25" 66 | endpoint_private_access = true 67 | endpoint_public_access = true 68 | enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] 69 | oidc_provider_enabled = true 70 | 71 | # Network 72 | vpc_id = module.vpc.vpc_id 73 | subnet_ids = module.subnets.private_subnet_id 74 | allowed_security_groups = [module.ssh.security_group_ids] 75 | allowed_cidr_blocks = ["0.0.0.0/0"] 76 | 77 | # Node Groups Defaults Values It will Work all Node Groups 78 | self_node_group_defaults = { 79 | subnet_ids = module.subnets.private_subnet_id 80 | key_name = module.keypair.name 81 | propagate_tags = [{ 82 | key = "aws-node-termination-handler/managed" 83 | value = true 84 | propagate_at_launch = true 85 | }, 86 | { 87 | key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" 88 | value = "owned" 89 | propagate_at_launch = true 90 | } 91 | ] 92 | 93 | block_device_mappings = { 94 | xvda = { 95 | device_name = "/dev/xvda" 96 | ebs = { 97 | volume_size = 50 98 | volume_type = "gp3" 99 | iops = 3000 100 | throughput = 150 101 | } 102 | } 103 | } 104 | } 105 | 106 | 107 | self_node_groups = { 108 | tools = { 109 | name = "tools" 110 | min_size = 1 111 | max_size = 7 112 | desired_size = 2 113 | bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" 114 | instance_type = "t3a.medium" 115 | } 116 | 117 | spot = { 118 | name = "spot" 119 | instance_market_options = { 120 | market_type = "spot" 121 | } 122 | min_size = 1 123 | max_size = 7 124 | desired_size = 1 125 | bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" 126 | instance_type = "m5.large" 127 | } 128 | } 129 | 130 | # Schdule self Managed Auto Scaling node group 131 | schedules = { 132 | scale-up = { 133 | min_size = 2 134 | max_size = 2 # Retains current max size 135 | desired_size = 2 136 | start_time = "2023-05-15T19:00:00Z" 137 | end_time = "2023-05-19T19:00:00Z" 138 | timezone = "Europe/Amsterdam" 139 | recurrence = "0 7 * * 1" 140 | }, 141 | scale-down = { 142 | min_size = 0 143 | max_size = 0 # Retains current max size 144 | desired_size = 0 145 | start_time = "2023-05-12T12:00:00Z" 146 | end_time = "2024-03-05T12:00:00Z" 147 | timezone = "Europe/Amsterdam" 148 | recurrence = "0 7 * * 5" 149 | } 150 | } 151 | 152 | # Node Groups Defaults Values It will Work all Node Groups 153 | managed_node_group_defaults = { 154 | subnet_ids = module.subnets.private_subnet_id 155 | key_name = module.keypair.name 156 | nodes_additional_security_group_ids = [module.ssh.security_group_ids] 157 | tags = { 158 | Example = "test" 159 | } 160 | 161 | block_device_mappings = { 162 | xvda = { 163 | device_name = "/dev/xvda" 164 | ebs = { 165 | volume_size = 50 166 | volume_type = "gp3" 167 | iops = 3000 168 | throughput = 150 169 | } 170 | } 171 | } 172 | } 173 | 174 | managed_node_group = { 175 | test = { 176 | min_size = 1 177 | max_size = 7 178 | desired_size = 2 179 | instance_types = ["t3a.medium"] 180 | } 181 | 182 | spot = { 183 | name = "spot" 184 | capacity_type = "SPOT" 185 | 186 | min_size = 1 187 | max_size = 7 188 | desired_size = 1 189 | force_update_version = true 190 | instance_types = ["t3.medium", "t3a.medium"] 191 | } 192 | } 193 | 194 | apply_config_map_aws_auth = true 195 | map_additional_iam_users = [ 196 | { 197 | userarn = "arn:aws:iam::xxxxxx:user/nikita@clouddrove.com" 198 | username = "nikita@clouddrove.com" 199 | groups = ["system:masters"] 200 | }, 201 | { 202 | userarn = "arn:aws:iam::xxxxxx:user/sohan@clouddrove.com" 203 | username = "sohan@clouddrove.com" 204 | groups = ["system:masters"] 205 | } 206 | ] 207 | # Schdule EKS Managed Auto Scaling node group 208 | schedules = { 209 | scale-up = { 210 | min_size = 2 211 | max_size = 2 # Retains current max size 212 | desired_size = 2 213 | start_time = "2023-05-15T19:00:00Z" 214 | end_time = "2023-05-19T19:00:00Z" 215 | timezone = "Europe/Amsterdam" 216 | recurrence = "0 7 * * 1" 217 | }, 218 | scale-down = { 219 | min_size = 0 220 | max_size = 0 # Retains current max size 221 | desired_size = 0 222 | start_time = "2023-05-12T12:00:00Z" 223 | end_time = "2024-03-05T12:00:00Z" 224 | timezone = "Europe/Amsterdam" 225 | recurrence = "0 7 * * 5" 226 | } 227 | } 228 | } 229 | ``` 230 | -------------------------------------------------------------------------------- /self_node_groups.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Self Managed Node Group 3 | ################################################################################ 4 | 5 | module "self_managed_node_group" { 6 | source = "./node_group/self_managed" 7 | 8 | for_each = { for k, v in var.self_node_groups : k => v if var.enabled } 9 | 10 | enabled = try(each.value.enabled, true) 11 | 12 | cluster_name = aws_eks_cluster.default[0].name 13 | security_group_ids = compact( 14 | concat( 15 | aws_security_group.node_group[*].id, 16 | aws_eks_cluster.default[*].vpc_config[0].cluster_security_group_id 17 | ) 18 | ) 19 | 20 | iam_instance_profile_arn = aws_iam_instance_profile.default[0].arn 21 | 22 | # Autoscaling Group 23 | name = try(each.value.name, each.key) 24 | environment = var.environment 25 | repository = var.repository 26 | 27 | 28 | availability_zones = try(each.value.availability_zones, var.self_node_group_defaults.availability_zones, null) 29 | subnet_ids = try(each.value.subnet_ids, var.self_node_group_defaults.subnet_ids, var.subnet_ids) 30 | key_name = try(each.value.key_name, var.self_node_group_defaults.key_name, null) 31 | 32 | min_size = try(each.value.min_size, var.self_node_group_defaults.min_size, 0) 33 | max_size = try(each.value.max_size, var.self_node_group_defaults.max_size, 3) 34 | desired_size = try(each.value.desired_size, var.self_node_group_defaults.desired_size, 1) 35 | capacity_rebalance = try(each.value.capacity_rebalance, var.self_node_group_defaults.capacity_rebalance, null) 36 | min_elb_capacity = try(each.value.min_elb_capacity, var.self_node_group_defaults.min_elb_capacity, null) 37 | wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_node_group_defaults.wait_for_elb_capacity, null) 38 | wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_node_group_defaults.wait_for_capacity_timeout, null) 39 | default_cooldown = try(each.value.default_cooldown, var.self_node_group_defaults.default_cooldown, null) 40 | protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_node_group_defaults.protect_from_scale_in, null) 41 | 42 | target_group_arns = try(each.value.target_group_arns, var.self_node_group_defaults.target_group_arns, null) 43 | placement_group = try(each.value.placement_group, var.self_node_group_defaults.placement_group, null) 44 | health_check_type = try(each.value.health_check_type, var.self_node_group_defaults.health_check_type, null) 45 | health_check_grace_period = try(each.value.health_check_grace_period, var.self_node_group_defaults.health_check_grace_period, null) 46 | 47 | force_delete = try(each.value.force_delete, var.self_node_group_defaults.force_delete, null) 48 | termination_policies = try(each.value.termination_policies, var.self_node_group_defaults.termination_policies, null) 49 | suspended_processes = try(each.value.suspended_processes, var.self_node_group_defaults.suspended_processes, null) 50 | max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_node_group_defaults.max_instance_lifetime, null) 51 | 52 | enabled_metrics = try(each.value.enabled_metrics, var.self_node_group_defaults.enabled_metrics, null) 53 | metrics_granularity = try(each.value.metrics_granularity, var.self_node_group_defaults.metrics_granularity, null) 54 | service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_node_group_defaults.service_linked_role_arn, null) 55 | 56 | initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_node_group_defaults.initial_lifecycle_hooks, []) 57 | instance_refresh = try(each.value.instance_refresh, var.self_node_group_defaults.instance_refresh, null) 58 | use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_node_group_defaults.use_mixed_instances_policy, false) 59 | mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_node_group_defaults.mixed_instances_policy, null) 60 | warm_pool = try(each.value.warm_pool, var.self_node_group_defaults.warm_pool, null) 61 | 62 | #------------ASG-Schedule-------------------------------------------------- 63 | create_schedule = try(each.value.create_schedule, var.self_node_group_defaults.create_schedule, false) 64 | schedules = try(each.value.schedules, var.self_node_group_defaults.schedules, var.schedules) 65 | 66 | delete_timeout = try(each.value.delete_timeout, var.self_node_group_defaults.delete_timeout, null) 67 | 68 | # User data 69 | cluster_endpoint = try(aws_eks_cluster.default[0].endpoint, "") 70 | cluster_auth_base64 = try(aws_eks_cluster.default[0].certificate_authority[0].data, "") 71 | pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_node_group_defaults.pre_bootstrap_user_data, "") 72 | post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_node_group_defaults.post_bootstrap_user_data, "") 73 | bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_node_group_defaults.bootstrap_extra_args, "") 74 | 75 | # Launch Template 76 | 77 | 78 | ebs_optimized = try(each.value.ebs_optimized, var.self_node_group_defaults.ebs_optimized, true) 79 | kubernetes_version = try(each.value.kubernetes_version, var.self_node_group_defaults.cluster_version, var.kubernetes_version) 80 | instance_type = try(each.value.instance_type, var.self_node_group_defaults.instance_type, "m6i.large") 81 | kms_key_id = try(each.value.kms_key_id, var.self_node_group_defaults.ebs_optimized, null) 82 | 83 | disable_api_termination = try(each.value.disable_api_termination, var.self_node_group_defaults.disable_api_termination, null) 84 | instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_node_group_defaults.instance_initiated_shutdown_behavior, null) 85 | kernel_id = try(each.value.kernel_id, var.self_node_group_defaults.kernel_id, null) 86 | ram_disk_id = try(each.value.ram_disk_id, var.self_node_group_defaults.ram_disk_id, null) 87 | 88 | block_device_mappings = try(each.value.block_device_mappings, var.self_node_group_defaults.block_device_mappings, []) 89 | capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_node_group_defaults.capacity_reservation_specification, null) 90 | cpu_options = try(each.value.cpu_options, var.self_node_group_defaults.cpu_options, null) 91 | credit_specification = try(each.value.credit_specification, var.self_node_group_defaults.credit_specification, null) 92 | enclave_options = try(each.value.enclave_options, var.self_node_group_defaults.enclave_options, null) 93 | hibernation_options = try(each.value.hibernation_options, var.self_node_group_defaults.hibernation_options, null) 94 | instance_market_options = try(each.value.instance_market_options, var.self_node_group_defaults.instance_market_options, null) 95 | license_specifications = try(each.value.license_specifications, var.self_node_group_defaults.license_specifications, null) 96 | metadata_options = try(each.value.metadata_options, var.self_node_group_defaults.metadata_options, local.metadata_options) 97 | enable_monitoring = try(each.value.enable_monitoring, var.self_node_group_defaults.enable_monitoring, false) 98 | # network_interfaces = try(each.value.network_interfaces, var.self_node_group_defaults.network_interfaces, []) 99 | placement = try(each.value.placement, var.self_node_group_defaults.placement, null) 100 | 101 | tags = merge(var.tags, try(each.value.tags, var.self_node_group_defaults.tags, {})) 102 | propagate_tags = try(each.value.propagate_tags, var.self_node_group_defaults.propagate_tags, []) 103 | 104 | } 105 | 106 | 107 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [![Banner](https://github.com/clouddrove/terraform-module-template/assets/119565952/67a8a1af-2eb7-40b7-ae07-c94cde9ce062)][website] 3 |

4 | Terraform AWS EKS 5 |

6 | 7 |

8 | With our comprehensive DevOps toolkit - streamline operations, automate workflows, enhance collaboration and, most importantly, deploy with confidence. 9 |

10 | 11 | 12 |

13 | 14 | 15 | Terraform 16 | 17 | 18 | tfsec 19 | 20 | 21 | Licence 22 | 23 | 24 | Changelog 25 | 26 | 27 | 28 |

29 |

30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |

45 |
46 | 47 | 48 | We are a group of DevOps engineers & architects, joining hands in this ever evolving digital landscape. With our strong belief in Automation; just like microservices, always on the lookout to split the the infrastructure into smaller connected resources (database, cluster and more) which could be standardized, are manageable, scalable, secure & follow industry best practices. 49 | 50 | 51 | This module includes Terraform open source, examples, and automation tests (for better understanding), which would help you create and improve your infrastructure with minimalistic coding. 52 | 53 | 54 | 55 | 56 | ## Prerequisites and Providers 57 | 58 | This table contains both Prerequisites and Providers: 59 | 60 | | Description | Name | Version | 61 | |:-------------:|:-------------------------------------------:|:---------:| 62 | | **Prerequisite** | [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) | >= 1.5.4 | 63 | | **Provider** | [aws](https://aws.amazon.com/) | >= 5.11.0 | 64 | 65 | 66 | 67 | 68 | 69 | ## Examples 70 | 71 | **IMPORTANT:** Since the master branch used in source varies based on new modifications, we recommend using the [release versions](https://github.com/clouddrove/terraform-aws-eks/releases). 72 | 73 | 📌 For additional usage examples, check the complete list under [`examples/`](./examples) directory. 74 | 75 | 76 | 77 | ## Inputs and Outputs 78 | 79 | Refer to complete documentation: [here](docs/io.md) 80 | 81 | 82 | 93 | 94 | 95 | ## Module Dependencies 96 | 97 | This module has dependencies on: 98 | - [Labels Module](https://github.com/clouddrove/terraform-aws-labels): Provides resource tagging. 99 | 100 | 101 | ## 📑 Changelog 102 | 103 | Refer [here](CHANGELOG.md). 104 | 105 | 106 | 107 | 108 | ## ✨ Contributors 109 | 110 | Big thanks to our contributors for elevating our project with their dedication and expertise! But, we do not wish to stop there, would like to invite contributions from the community in improving these projects and making them more versatile for better reach. Remember, every bit of contribution is immensely valuable, as, together, we are moving in only 1 direction, i.e. forward. 111 | 112 | 113 | 114 | 115 |
116 |
117 | 118 | If you're considering contributing to our project, here are a few quick guidelines that we have been following (Got a suggestion? We are all ears!): 119 | 120 | - **Fork the Repository:** Create a new branch for your feature or bug fix. 121 | - **Coding Standards:** You know the drill. 122 | - **Clear Commit Messages:** Write clear and concise commit messages to facilitate understanding. 123 | - **Thorough Testing:** Test your changes thoroughly before submitting a pull request. 124 | - **Documentation Updates:** Include relevant documentation updates if your changes impact it. 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | ## Feedback 139 | Spot a bug or have thoughts to share with us? Let's squash it together! Log it in our [issue tracker](https://github.com/clouddrove/terraform-aws-eks/issues), feel free to drop us an email at [hello@clouddrove.com](mailto:hello@clouddrove.com). 140 | 141 | Show some love with a ★ on [our GitHub](https://github.com/clouddrove/terraform-aws-eks)! if our work has brightened your day! – your feedback fuels our journey! 142 | 143 | 144 | ## :rocket: Our Accomplishment 145 | 146 | We have [*100+ Terraform modules*][terraform_modules] 🙌. You could consider them finished, but, with enthusiasts like yourself, we are able to ever improve them, so we call our status - improvement in progress. 147 | 148 | - [Terraform Module Registry:](https://registry.terraform.io/namespaces/clouddrove) Discover our Terraform modules here. 149 | 150 | - [Terraform Modules for AWS/Azure Modules:](https://github.com/clouddrove/toc) Explore our comprehensive Table of Contents for easy navigation through our documentation for modules pertaining to AWS, Azure & GCP. 151 | 152 | - [Terraform Modules for Digital Ocean:](https://github.com/terraform-do-modules/toc) Check out our specialized Terraform modules for Digital Ocean. 153 | 154 | 155 | 156 | 157 | ## Join Our Slack Community 158 | 159 | Join our vibrant open-source slack community and embark on an ever-evolving journey with CloudDrove; helping you in moving upwards in your career path. 160 | Join our vibrant Open Source Slack Community and embark on a learning journey with CloudDrove. Grow with us in the world of DevOps and set your career on a path of consistency. 161 | 162 | 🌐💬What you'll get after joining this Slack community: 163 | 164 | - 🚀 Encouragement to upgrade your best version. 165 | - 🌈 Learning companionship with our DevOps squad. 166 | - 🌱 Relentless growth with daily updates on new advancements in technologies. 167 | 168 | Join our tech elites [Join Now][slack] 🚀 169 | 170 | 171 | ## Explore Our Blogs 172 | 173 | Click [here][blog] :books: :star2: 174 | 175 | ## Tap into our capabilities 176 | We provide a platform for organizations to engage with experienced top-tier DevOps & Cloud services. Tap into our pool of certified engineers and architects to elevate your DevOps and Cloud Solutions. 177 | 178 | At [CloudDrove][website], has extensive experience in designing, building & migrating environments, securing, consulting, monitoring, optimizing, automating, and maintaining complex and large modern systems. With remarkable client footprints in American & European corridors, our certified architects & engineers are ready to serve you as per your requirements & schedule. Write to us at [business@clouddrove.com](mailto:business@clouddrove.com). 179 | 180 |

We are The Cloud Experts!

181 |
182 |

We ❤️ Open Source and you can check out our other modules to get help with your new Cloud ideas.

183 | 184 | [website]: https://clouddrove.com 185 | [blog]: https://blog.clouddrove.com 186 | [slack]: https://www.launchpass.com/devops-talks 187 | [github]: https://github.com/clouddrove 188 | [linkedin]: https://cpco.io/linkedin 189 | [twitter]: https://twitter.com/clouddrove/ 190 | [email]: https://clouddrove.com/contact-us.html 191 | [terraform_modules]: https://github.com/clouddrove?utf8=%E2%9C%93&q=terraform-&type=&language= 192 | -------------------------------------------------------------------------------- /examples/eks-auto-mode/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | 5 | data "aws_availability_zones" "available" { 6 | # Exclude local zones 7 | filter { 8 | name = "opt-in-status" 9 | values = ["opt-in-not-required"] 10 | } 11 | } 12 | 13 | locals { 14 | name = "clouddrove-eks" 15 | cluster_version = "1.32" 16 | region = "eu-west-1" 17 | 18 | vpc_cidr = "10.0.0.0/16" 19 | environment = "test" 20 | label_order = ["name", "environment"] 21 | azs = slice(data.aws_availability_zones.available.names, 0, 3) 22 | 23 | tags = { 24 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 25 | } 26 | } 27 | 28 | # ################################################################################ 29 | # Security Groups module call 30 | ################################################################################ 31 | 32 | module "ssh" { 33 | source = "clouddrove/security-group/aws" 34 | version = "2.0.0" 35 | 36 | name = "${local.name}-ssh" 37 | environment = local.environment 38 | label_order = local.label_order 39 | vpc_id = module.vpc.vpc_id 40 | new_sg_ingress_rules_with_cidr_blocks = [{ 41 | rule_count = 1 42 | from_port = 22 43 | protocol = "tcp" 44 | to_port = 22 45 | cidr_blocks = [local.vpc_cidr] 46 | description = "Allow ssh traffic." 47 | } 48 | ] 49 | 50 | ## EGRESS Rules 51 | new_sg_egress_rules_with_cidr_blocks = [{ 52 | rule_count = 1 53 | from_port = 22 54 | protocol = "tcp" 55 | to_port = 22 56 | cidr_blocks = [local.vpc_cidr] 57 | description = "Allow ssh outbound traffic." 58 | }] 59 | } 60 | 61 | #tfsec:ignore:aws-ec2-no-public-ingress-acl ## reason: Public subnets need internet access for EKS load balancer 62 | #tfsec:ignore:aws-ec2-no-excessive-port-access ## reason: Required for EKS public access 63 | module "http_https" { 64 | source = "clouddrove/security-group/aws" 65 | version = "2.0.0" 66 | 67 | name = "${local.name}-http-https" 68 | environment = local.environment 69 | label_order = local.label_order 70 | vpc_id = module.vpc.vpc_id 71 | ## INGRESS Rules 72 | new_sg_ingress_rules_with_cidr_blocks = [{ 73 | rule_count = 1 74 | from_port = 22 75 | protocol = "tcp" 76 | to_port = 22 77 | cidr_blocks = [local.vpc_cidr] 78 | description = "Allow ssh traffic." 79 | }, 80 | { 81 | rule_count = 2 82 | from_port = 80 83 | protocol = "tcp" 84 | to_port = 80 85 | cidr_blocks = [local.vpc_cidr] 86 | description = "Allow http traffic." 87 | }, 88 | { 89 | rule_count = 3 90 | from_port = 443 91 | protocol = "tcp" 92 | to_port = 443 93 | cidr_blocks = [local.vpc_cidr] 94 | description = "Allow https traffic." 95 | } 96 | ] 97 | 98 | ## EGRESS Rules 99 | new_sg_egress_rules_with_cidr_blocks = [{ 100 | rule_count = 1 101 | from_port = 0 102 | protocol = "-1" 103 | to_port = 0 104 | cidr_blocks = [local.vpc_cidr] 105 | ipv6_cidr_blocks = ["::/0"] 106 | description = "Allow all traffic." 107 | } 108 | ] 109 | } 110 | 111 | ################################################################################ 112 | # EKS Module 113 | ################################################################################ 114 | 115 | module "eks" { 116 | source = "../.." 117 | 118 | name = "automode-auth" 119 | environment = local.environment 120 | label_order = local.label_order 121 | 122 | cluster_compute_config = { 123 | enabled = true 124 | node_pools = ["general-purpose"] 125 | } 126 | create = true 127 | enable_cluster_creator_admin_permissions = true 128 | authentication_mode = "API_AND_CONFIG_MAP" 129 | 130 | vpc_id = module.vpc.vpc_id 131 | subnet_ids = module.subnets.private_subnet_id 132 | allowed_security_groups = [module.ssh.security_group_id] 133 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 134 | 135 | apply_config_map_aws_auth = false 136 | 137 | 138 | 139 | ######## Access entry for eks cluster with Admin access ########## 140 | access_entries = { 141 | "admin-role-access" = { 142 | principal_arn = "arn:aws:iam::924144197303:role/automated-eks-cluster-assume-role" 143 | kubernetes_groups = [] 144 | type = "STANDARD" 145 | policy_associations = { 146 | "full-access" = { 147 | policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" 148 | access_scope = { 149 | type = "cluster" 150 | namespaces = [] 151 | } 152 | } 153 | } 154 | }, 155 | ####### Readonly access ######## 156 | "read-only-access" = { 157 | principal_arn = "arn:aws:iam::924144197303:role/automated-eks-cluster-assume-role" 158 | kubernetes_groups = [] 159 | type = "STANDARD" 160 | policy_associations = { 161 | "view-access" = { 162 | policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" 163 | access_scope = { 164 | type = "cluster" 165 | namespaces = [] 166 | } 167 | } 168 | } 169 | } 170 | } 171 | 172 | tags = local.tags 173 | } 174 | 175 | ################################################################################ 176 | # Supporting Resources 177 | ################################################################################ 178 | 179 | module "vpc" { 180 | source = "clouddrove/vpc/aws" 181 | version = "2.0.0" 182 | 183 | name = local.name 184 | environment = local.environment 185 | label_order = local.label_order 186 | cidr_block = local.vpc_cidr 187 | 188 | } 189 | 190 | 191 | ################################################################################ 192 | # Subnet Module 193 | ################################################################################ 194 | #tfsec:ignore:aws-ec2-no-public-ingress-acl ## reason: Public subnets need internet access for EKS load balancer 195 | #tfsec:ignore:aws-ec2-no-excessive-port-access ## reason: Required for EKS public access 196 | module "subnets" { 197 | source = "clouddrove/subnet/aws" 198 | version = "2.0.0" 199 | 200 | name = "${local.name}-subnets" 201 | environment = local.environment 202 | label_order = local.label_order 203 | 204 | nat_gateway_enabled = true 205 | availability_zones = ["${local.region}a", "${local.region}b"] 206 | vpc_id = module.vpc.vpc_id 207 | cidr_block = module.vpc.vpc_cidr_block 208 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 209 | type = "public-private" 210 | igw_id = module.vpc.igw_id 211 | 212 | extra_public_tags = { 213 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 214 | "kubernetes.io/role/elb" = "1" 215 | } 216 | 217 | extra_private_tags = { 218 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 219 | "kubernetes.io/role/internal-elb" = "1" 220 | } 221 | 222 | public_inbound_acl_rules = [ 223 | { 224 | rule_number = 100 225 | rule_action = "allow" 226 | from_port = 0 227 | to_port = 0 228 | protocol = "-1" 229 | cidr_block = "0.0.0.0/0" 230 | }, 231 | { 232 | rule_number = 101 233 | rule_action = "allow" 234 | from_port = 0 235 | to_port = 0 236 | protocol = "-1" 237 | ipv6_cidr_block = "::/0" 238 | }, 239 | ] 240 | 241 | public_outbound_acl_rules = [ 242 | { 243 | rule_number = 100 244 | rule_action = "allow" 245 | from_port = 0 246 | to_port = 0 247 | protocol = "-1" 248 | cidr_block = "0.0.0.0/0" 249 | }, 250 | { 251 | rule_number = 101 252 | rule_action = "allow" 253 | from_port = 0 254 | to_port = 0 255 | protocol = "-1" 256 | ipv6_cidr_block = "::/0" 257 | }, 258 | ] 259 | 260 | #tfsec:ignore:aws-ec2-no-excessive-port-access ## reason: Required for EKS public access 261 | private_inbound_acl_rules = [ 262 | { 263 | rule_number = 100 264 | rule_action = "allow" 265 | from_port = 0 266 | to_port = 0 267 | protocol = "-1" 268 | cidr_block = "0.0.0.0/0" 269 | }, 270 | { 271 | rule_number = 101 272 | rule_action = "allow" 273 | from_port = 0 274 | to_port = 0 275 | protocol = "-1" 276 | ipv6_cidr_block = "::/0" 277 | }, 278 | ] 279 | 280 | #tfsec:ignore:aws-ec2-no-excessive-port-access ## reason: Required for EKS public access 281 | private_outbound_acl_rules = [ 282 | { 283 | rule_number = 100 284 | rule_action = "allow" 285 | from_port = 0 286 | to_port = 0 287 | protocol = "-1" 288 | cidr_block = "0.0.0.0/0" 289 | }, 290 | { 291 | rule_number = 101 292 | rule_action = "allow" 293 | from_port = 0 294 | to_port = 0 295 | protocol = "-1" 296 | ipv6_cidr_block = "::/0" 297 | }, 298 | ] 299 | } -------------------------------------------------------------------------------- /node_group/aws_managed/variables.tf: -------------------------------------------------------------------------------- 1 | #Module : LABEL 2 | #Description : Terraform label module variables. 3 | variable "name" { 4 | type = string 5 | default = "" 6 | description = "Name (e.g. `app` or `cluster`)." 7 | } 8 | 9 | variable "repository" { 10 | type = string 11 | default = "https://github.com/clouddrove/terraform-aws-eks" 12 | description = "Terraform current module repo" 13 | } 14 | 15 | variable "environment" { 16 | type = string 17 | default = "" 18 | description = "Environment (e.g. `prod`, `dev`, `staging`)." 19 | } 20 | 21 | variable "label_order" { 22 | type = list(any) 23 | default = [] 24 | description = "Label order, e.g. `name`,`application`." 25 | } 26 | 27 | variable "managedby" { 28 | type = string 29 | default = "hello@clouddrove.com" 30 | description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." 31 | } 32 | 33 | variable "attributes" { 34 | type = list(any) 35 | default = [] 36 | description = "Additional attributes (e.g. `1`)." 37 | } 38 | 39 | variable "tags" { 40 | type = map(any) 41 | default = {} 42 | description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." 43 | } 44 | 45 | 46 | variable "enabled" { 47 | type = bool 48 | default = true 49 | description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." 50 | } 51 | 52 | variable "cluster_name" { 53 | description = "Name of associated EKS cluster" 54 | type = string 55 | default = null 56 | } 57 | 58 | #-----------------------------------------------------------Launch_Template--------------------------------------------------------- 59 | 60 | variable "launch_template_description" { 61 | description = "Description of the launch template" 62 | type = string 63 | default = null 64 | } 65 | 66 | variable "ebs_optimized" { 67 | description = "If true, the launched EC2 instance(s) will be EBS-optimized" 68 | type = bool 69 | default = null 70 | } 71 | 72 | variable "ami_id" { 73 | description = "The AMI from which to launch the instance. If not supplied, EKS will use its own default image" 74 | type = string 75 | default = "" 76 | } 77 | 78 | variable "key_name" { 79 | description = "The key name that should be used for the instance(s)" 80 | type = string 81 | default = null 82 | } 83 | 84 | variable "vpc_security_group_ids" { 85 | description = "A list of security group IDs to associate" 86 | type = list(string) 87 | default = [] 88 | } 89 | 90 | variable "launch_template_default_version" { 91 | description = "Default version of the launch template" 92 | type = string 93 | default = null 94 | } 95 | 96 | variable "update_launch_template_default_version" { 97 | description = "Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version`" 98 | type = bool 99 | default = true 100 | } 101 | 102 | variable "disable_api_termination" { 103 | description = "If true, enables EC2 instance termination protection" 104 | type = bool 105 | default = null 106 | } 107 | 108 | variable "kernel_id" { 109 | description = "The kernel ID" 110 | type = string 111 | default = null 112 | } 113 | 114 | variable "ram_disk_id" { 115 | description = "The ID of the ram disk" 116 | type = string 117 | default = null 118 | } 119 | 120 | variable "block_device_mappings" { 121 | description = "Specify volumes to attach to the instance besides the volumes specified by the AMI" 122 | type = any 123 | default = {} 124 | } 125 | 126 | variable "capacity_reservation_specification" { 127 | description = "Targeting for EC2 capacity reservations" 128 | type = any 129 | default = null 130 | } 131 | 132 | variable "cpu_options" { 133 | description = "The CPU options for the instance" 134 | type = map(string) 135 | default = null 136 | } 137 | 138 | variable "credit_specification" { 139 | description = "Customize the credit specification of the instance" 140 | type = map(string) 141 | default = null 142 | } 143 | 144 | variable "enclave_options" { 145 | description = "Enable Nitro Enclaves on launched instances" 146 | type = map(string) 147 | default = null 148 | } 149 | 150 | variable "instance_market_options" { 151 | description = "The market (purchasing) option for the instance" 152 | type = any 153 | default = null 154 | } 155 | 156 | variable "license_specifications" { 157 | description = "A list of license specifications to associate with" 158 | type = map(string) 159 | default = null 160 | } 161 | 162 | variable "metadata_options" { 163 | description = "Customize the metadata options for the instance" 164 | type = map(string) 165 | default = { 166 | http_endpoint = "enabled" 167 | http_tokens = "required" 168 | http_put_response_hop_limit = 2 169 | } 170 | } 171 | 172 | variable "kms_key_id" { 173 | type = string 174 | default = null 175 | description = "The KMS ID of EBS volume" 176 | } 177 | 178 | 179 | variable "enable_monitoring" { 180 | description = "Enables/disables detailed monitoring" 181 | type = bool 182 | default = false 183 | } 184 | 185 | variable "network_interfaces" { 186 | description = "Customize network interfaces to be attached at instance boot time" 187 | type = list(any) 188 | default = [] 189 | } 190 | 191 | variable "placement" { 192 | description = "The placement of the instance" 193 | type = map(string) 194 | default = null 195 | } 196 | 197 | variable "launch_template_tags" { 198 | description = "A map of additional tags to add to the tag_specifications of launch template created" 199 | type = map(string) 200 | default = {} 201 | } 202 | 203 | #EKS_Managed_Node_Group 204 | 205 | variable "subnet_ids" { 206 | description = "Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME`" 207 | type = list(string) 208 | default = null 209 | } 210 | 211 | variable "min_size" { 212 | description = "Minimum number of instances/nodes" 213 | type = number 214 | default = 0 215 | } 216 | 217 | variable "max_size" { 218 | description = "Maximum number of instances/nodes" 219 | type = number 220 | default = 3 221 | } 222 | 223 | variable "desired_size" { 224 | description = "Desired number of instances/nodes" 225 | type = number 226 | default = 1 227 | } 228 | 229 | variable "ami_type" { 230 | description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64`" 231 | type = string 232 | default = null 233 | } 234 | 235 | variable "ami_release_version" { 236 | description = "AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version" 237 | type = string 238 | default = null 239 | } 240 | 241 | variable "capacity_type" { 242 | description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`" 243 | type = string 244 | default = "ON_DEMAND" 245 | } 246 | 247 | variable "disk_size" { 248 | description = "Disk size in GiB for nodes. Defaults to `20`" 249 | type = number 250 | default = null 251 | } 252 | 253 | variable "force_update_version" { 254 | description = "Force version update if existing pods are unable to be drained due to a pod disruption budget issue" 255 | type = bool 256 | default = null 257 | } 258 | 259 | variable "iam_role_arn" { 260 | type = string 261 | default = "" 262 | description = "" 263 | } 264 | 265 | 266 | variable "instance_types" { 267 | description = "Set of instance types associated with the EKS Node Group. Defaults to `[\"t3.medium\"]`" 268 | type = list(string) 269 | default = null 270 | } 271 | 272 | variable "labels" { 273 | description = "Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed" 274 | type = map(string) 275 | default = null 276 | } 277 | 278 | variable "cluster_version" { 279 | description = "Kubernetes version. Defaults to EKS Cluster Kubernetes version" 280 | type = string 281 | default = null 282 | } 283 | 284 | variable "remote_access" { 285 | description = "Configuration block with remote access settings" 286 | type = any 287 | default = {} 288 | } 289 | 290 | variable "taints" { 291 | description = "The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group" 292 | type = any 293 | default = {} 294 | } 295 | 296 | variable "update_config" { 297 | description = "Configuration block of settings for max unavailable resources during node group updates" 298 | type = map(string) 299 | default = {} 300 | } 301 | 302 | variable "timeouts" { 303 | description = "Create, update, and delete timeout configurations for the node group" 304 | type = map(string) 305 | default = {} 306 | } 307 | 308 | variable "before_cluster_joining_userdata" { 309 | type = string 310 | default = "" 311 | description = "Additional commands to execute on each worker node before joining the EKS cluster (before executing the `bootstrap.sh` script). For more info, see https://kubedex.com/90-days-of-aws-eks-in-test" 312 | } 313 | 314 | #-----------------------------------------------ASG-Schedule---------------------------------------------------------------- 315 | 316 | variable "create_schedule" { 317 | description = "Determines whether to create autoscaling group schedule or not" 318 | type = bool 319 | default = true 320 | } 321 | 322 | variable "schedules" { 323 | description = "Map of autoscaling group schedule to create" 324 | type = map(any) 325 | default = {} 326 | } 327 | -------------------------------------------------------------------------------- /iam.tf: -------------------------------------------------------------------------------- 1 | 2 | data "aws_iam_policy_document" "assume_role" { 3 | count = var.enabled && var.external_cluster == false ? 1 : 0 4 | 5 | statement { 6 | effect = "Allow" 7 | actions = [ 8 | "sts:AssumeRole", 9 | "sts:TagSession", 10 | ] 11 | 12 | principals { 13 | type = "Service" 14 | identifiers = ["eks.amazonaws.com"] 15 | } 16 | } 17 | } 18 | 19 | resource "aws_iam_role" "default" { 20 | count = var.enabled && var.external_cluster == false ? 1 : 0 21 | 22 | name = module.labels.id 23 | assume_role_policy = data.aws_iam_policy_document.assume_role[0].json 24 | permissions_boundary = var.permissions_boundary 25 | 26 | tags = module.labels.tags 27 | } 28 | 29 | resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" { 30 | count = var.enabled && var.external_cluster == false ? 1 : 0 31 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", data.aws_partition.current.partition) 32 | role = aws_iam_role.default[0].name 33 | } 34 | 35 | resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" { 36 | count = var.enabled && var.external_cluster == false ? 1 : 0 37 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", data.aws_partition.current.partition) 38 | role = aws_iam_role.default[0].name 39 | } 40 | 41 | resource "aws_iam_role_policy_attachment" "amazon_eks_block_storage_policy" { 42 | count = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? 1 : 0 43 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSBlockStoragePolicy", data.aws_partition.current.partition) 44 | role = aws_iam_role.default[0].name 45 | } 46 | 47 | resource "aws_iam_role_policy_attachment" "amazon_eks_compute_policy" { 48 | count = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? 1 : 0 49 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSComputePolicy", data.aws_partition.current.partition) 50 | role = aws_iam_role.default[0].name 51 | } 52 | 53 | resource "aws_iam_role_policy_attachment" "amazon_eks_load_balancing_policy" { 54 | count = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? 1 : 0 55 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSLoadBalancingPolicy", data.aws_partition.current.partition) 56 | role = aws_iam_role.default[0].name 57 | } 58 | 59 | resource "aws_iam_role_policy_attachment" "amazon_eks_networking_policy" { 60 | count = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? 1 : 0 61 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSNetworkingPolicy", data.aws_partition.current.partition) 62 | role = aws_iam_role.default[0].name 63 | } 64 | 65 | data "aws_iam_policy_document" "service_role" { 66 | count = var.enabled && var.external_cluster == false ? 1 : 0 67 | 68 | statement { 69 | effect = "Allow" 70 | actions = [ 71 | "ec2:DescribeInternetGateways", 72 | "elasticloadbalancing:SetIpAddressType", 73 | "elasticloadbalancing:SetSubnets", 74 | "ec2:DescribeAccountAttributes", 75 | "ec2:DescribeAddresses", 76 | ] 77 | resources = ["*"] 78 | } 79 | } 80 | 81 | resource "aws_iam_role_policy" "service_role" { 82 | count = var.enabled && var.external_cluster == false ? 1 : 0 83 | role = aws_iam_role.default[0].name 84 | policy = data.aws_iam_policy_document.service_role[0].json 85 | 86 | name = module.labels.id 87 | 88 | } 89 | 90 | 91 | #-------------------------------------------------------IAM FOR node Group---------------------------------------------- 92 | 93 | #Module : IAM ROLE 94 | #Description : Provides an IAM role. 95 | resource "aws_iam_role" "node_groups" { 96 | count = var.enabled && var.external_cluster == false ? 1 : 0 97 | name = "${module.labels.id}-node_group" 98 | assume_role_policy = data.aws_iam_policy_document.node_group[0].json 99 | tags = module.labels.tags 100 | } 101 | 102 | #Module : IAM ROLE POLICY ATTACHMENT CNI 103 | #Description : Attaches a Managed IAM Policy to an IAM role. 104 | resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" { 105 | count = var.enabled && var.external_cluster == false ? 1 : 0 106 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" 107 | role = aws_iam_role.node_groups[0].name 108 | } 109 | 110 | resource "aws_iam_role_policy_attachment" "additional" { 111 | for_each = { for k, v in var.iam_role_additional_policies : k => v if var.enabled } 112 | 113 | policy_arn = each.value 114 | role = aws_iam_role.node_groups[0].name 115 | } 116 | 117 | #Module : IAM ROLE POLICY ATTACHMENT EC2 CONTAINER REGISTRY READ ONLY 118 | #Description : Attaches a Managed IAM Policy to an IAM role. 119 | resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" { 120 | count = var.enabled && var.external_cluster == false ? 1 : 0 121 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" 122 | role = aws_iam_role.node_groups[0].name 123 | } 124 | 125 | resource "aws_iam_policy" "amazon_eks_node_group_autoscaler_policy" { 126 | count = var.enabled && var.external_cluster == false ? 1 : 0 127 | name = format("%s-node-group-policy", module.labels.id) 128 | policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy[0].json 129 | } 130 | 131 | resource "aws_iam_role_policy_attachment" "amazon_eks_node_group_autoscaler_policy" { 132 | count = var.enabled && var.external_cluster == false ? 1 : 0 133 | policy_arn = aws_iam_policy.amazon_eks_node_group_autoscaler_policy[0].arn 134 | role = aws_iam_role.node_groups[0].name 135 | } 136 | 137 | resource "aws_iam_policy" "amazon_eks_worker_node_autoscaler_policy" { 138 | count = var.enabled && var.external_cluster == false ? 1 : 0 139 | name = "${module.labels.id}-autoscaler" 140 | path = "/" 141 | policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy[0].json 142 | } 143 | 144 | resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_autoscaler_policy" { 145 | count = var.enabled && var.external_cluster == false ? 1 : 0 146 | policy_arn = aws_iam_policy.amazon_eks_worker_node_autoscaler_policy[0].arn 147 | role = aws_iam_role.node_groups[0].name 148 | } 149 | 150 | resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" { 151 | count = var.enabled && var.external_cluster == false ? 1 : 0 152 | policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEKSWorkerNodePolicy") 153 | role = aws_iam_role.node_groups[0].name 154 | } 155 | 156 | data "aws_iam_policy_document" "node_group" { 157 | count = var.enabled && var.external_cluster == false ? 1 : 0 158 | 159 | statement { 160 | effect = "Allow" 161 | actions = [ 162 | "sts:AssumeRole", 163 | "sts:TagSession", 164 | ] 165 | 166 | principals { 167 | type = "Service" 168 | identifiers = ["ec2.amazonaws.com"] 169 | } 170 | } 171 | } 172 | 173 | # Autoscaler policy for node group 174 | data "aws_iam_policy_document" "amazon_eks_node_group_autoscaler_policy" { 175 | count = var.enabled && var.external_cluster == false ? 1 : 0 176 | 177 | statement { 178 | effect = "Allow" 179 | actions = [ 180 | "autoscaling:DescribeAutoScalingGroups", 181 | "autoscaling:DescribeAutoScalingInstances", 182 | "autoscaling:DescribeLaunchConfigurations", 183 | "autoscaling:DescribeTags", 184 | "autoscaling:SetDesiredCapacity", 185 | "autoscaling:TerminateInstanceInAutoScalingGroup", 186 | "autoscaling:TerminateInstanceInAutoScalingGroup", 187 | "ec2:DescribeLaunchTemplateVersions", 188 | "ecr:*" 189 | ] 190 | resources = ["*"] 191 | } 192 | } 193 | 194 | #Module : IAM INSTANCE PROFILE 195 | #Description : Provides an IAM instance profile. 196 | resource "aws_iam_instance_profile" "default" { 197 | count = var.enabled && var.external_cluster == false ? 1 : 0 198 | name = format("%s-instance-profile", module.labels.id) 199 | role = aws_iam_role.node_groups[0].name 200 | } 201 | 202 | 203 | 204 | ################################################################################ 205 | # EKS Auto Node IAM Role 206 | ################################################################################ 207 | 208 | data "aws_iam_policy_document" "node_assume_role_policy" { 209 | count = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? 1 : 0 210 | 211 | statement { 212 | sid = "EKSAutoNodeAssumeRole" 213 | actions = [ 214 | "sts:AssumeRole", 215 | "sts:TagSession", 216 | ] 217 | 218 | principals { 219 | type = "Service" 220 | identifiers = ["ec2.amazonaws.com"] 221 | } 222 | } 223 | } 224 | 225 | resource "aws_iam_role" "eks_auto" { 226 | count = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? 1 : 0 227 | 228 | name = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name 229 | name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null 230 | path = var.node_iam_role_path 231 | description = var.node_iam_role_description 232 | 233 | assume_role_policy = data.aws_iam_policy_document.node_assume_role_policy[0].json 234 | permissions_boundary = var.node_iam_role_permissions_boundary 235 | force_detach_policies = true 236 | 237 | tags = merge(var.tags, var.node_iam_role_tags) 238 | } 239 | 240 | # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html 241 | resource "aws_iam_role_policy_attachment" "eks_auto" { 242 | for_each = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? { 243 | AmazonEKSWorkerNodeMinimalPolicy = "${local.aws_policy_prefix}/AmazonEKSWorkerNodeMinimalPolicy", 244 | AmazonEC2ContainerRegistryPullOnly = "${local.aws_policy_prefix}/AmazonEC2ContainerRegistryPullOnly", 245 | } : {} 246 | 247 | policy_arn = each.value 248 | role = aws_iam_role.eks_auto[0].name 249 | } 250 | 251 | resource "aws_iam_role_policy_attachment" "eks_auto_additional" { 252 | for_each = var.enabled && var.external_cluster == false && length(var.cluster_compute_config) > 0 ? var.node_iam_role_additional_policies : {} 253 | 254 | policy_arn = each.value 255 | role = aws_iam_role.eks_auto[0].name 256 | } 257 | 258 | -------------------------------------------------------------------------------- /examples/self_managed/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | locals { 5 | name = "clouddrove-eks" 6 | region = "eu-west-1" 7 | vpc_cidr_block = module.vpc.vpc_cidr_block 8 | additional_cidr_block = "172.16.0.0/16" 9 | environment = "test" 10 | label_order = ["name", "environment"] 11 | tags = { 12 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 13 | } 14 | } 15 | 16 | ################################################################################ 17 | # VPC module call 18 | ################################################################################ 19 | module "vpc" { 20 | source = "clouddrove/vpc/aws" 21 | version = "2.0.0" 22 | 23 | name = "${local.name}-vpc" 24 | environment = local.environment 25 | cidr_block = "10.10.0.0/16" 26 | } 27 | 28 | ################################################################################ 29 | # Subnets 30 | ################################################################################ 31 | module "subnets" { 32 | source = "clouddrove/subnet/aws" 33 | version = "2.0.0" 34 | 35 | name = "${local.name}-subnets" 36 | environment = local.environment 37 | nat_gateway_enabled = true 38 | availability_zones = ["${local.region}a", "${local.region}b"] 39 | vpc_id = module.vpc.vpc_id 40 | cidr_block = module.vpc.vpc_cidr_block 41 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 42 | type = "public-private" 43 | igw_id = module.vpc.igw_id 44 | 45 | extra_public_tags = { 46 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 47 | "kubernetes.io/role/elb" = "1" 48 | } 49 | 50 | extra_private_tags = { 51 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 52 | "kubernetes.io/role/internal-elb" = "1" 53 | } 54 | 55 | public_inbound_acl_rules = [ 56 | { 57 | rule_number = 100 58 | rule_action = "allow" 59 | from_port = 0 60 | to_port = 0 61 | protocol = "-1" 62 | cidr_block = "0.0.0.0/0" 63 | }, 64 | { 65 | rule_number = 101 66 | rule_action = "allow" 67 | from_port = 0 68 | to_port = 0 69 | protocol = "-1" 70 | ipv6_cidr_block = "::/0" 71 | }, 72 | ] 73 | public_outbound_acl_rules = [ 74 | { 75 | rule_number = 100 76 | rule_action = "allow" 77 | from_port = 0 78 | to_port = 0 79 | protocol = "-1" 80 | cidr_block = "0.0.0.0/0" 81 | }, 82 | { 83 | rule_number = 101 84 | rule_action = "allow" 85 | from_port = 0 86 | to_port = 0 87 | protocol = "-1" 88 | ipv6_cidr_block = "::/0" 89 | }, 90 | ] 91 | private_inbound_acl_rules = [ 92 | { 93 | rule_number = 100 94 | rule_action = "allow" 95 | from_port = 0 96 | to_port = 0 97 | protocol = "-1" 98 | cidr_block = "0.0.0.0/0" 99 | }, 100 | { 101 | rule_number = 101 102 | rule_action = "allow" 103 | from_port = 0 104 | to_port = 0 105 | protocol = "-1" 106 | ipv6_cidr_block = "::/0" 107 | }, 108 | ] 109 | private_outbound_acl_rules = [ 110 | { 111 | rule_number = 100 112 | rule_action = "allow" 113 | from_port = 0 114 | to_port = 0 115 | protocol = "-1" 116 | cidr_block = "0.0.0.0/0" 117 | }, 118 | { 119 | rule_number = 101 120 | rule_action = "allow" 121 | from_port = 0 122 | to_port = 0 123 | protocol = "-1" 124 | ipv6_cidr_block = "::/0" 125 | }, 126 | ] 127 | } 128 | 129 | # ################################################################################ 130 | # Security Groups 131 | ################################################################################ 132 | 133 | module "ssh" { 134 | source = "clouddrove/security-group/aws" 135 | version = "2.0.0" 136 | 137 | name = "${local.name}-ssh" 138 | environment = local.environment 139 | vpc_id = module.vpc.vpc_id 140 | new_sg_ingress_rules_with_cidr_blocks = [{ 141 | rule_count = 1 142 | from_port = 22 143 | protocol = "tcp" 144 | to_port = 22 145 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 146 | description = "Allow ssh traffic." 147 | }, 148 | { 149 | rule_count = 2 150 | from_port = 27017 151 | protocol = "tcp" 152 | to_port = 27017 153 | cidr_blocks = [local.additional_cidr_block] 154 | description = "Allow Mongodb traffic." 155 | } 156 | ] 157 | ## EGRESS Rules 158 | new_sg_egress_rules_with_cidr_blocks = [{ 159 | rule_count = 1 160 | from_port = 22 161 | protocol = "tcp" 162 | to_port = 22 163 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 164 | description = "Allow ssh outbound traffic." 165 | }, 166 | { 167 | rule_count = 2 168 | from_port = 27017 169 | protocol = "tcp" 170 | to_port = 27017 171 | cidr_blocks = [local.additional_cidr_block] 172 | description = "Allow Mongodb outbound traffic." 173 | }] 174 | } 175 | 176 | module "http_https" { 177 | source = "clouddrove/security-group/aws" 178 | version = "2.0.0" 179 | 180 | name = "${local.name}-http-https" 181 | environment = local.environment 182 | vpc_id = module.vpc.vpc_id 183 | ## INGRESS Rules 184 | new_sg_ingress_rules_with_cidr_blocks = [{ 185 | rule_count = 1 186 | from_port = 22 187 | protocol = "tcp" 188 | to_port = 22 189 | cidr_blocks = [module.vpc.vpc_cidr_block] 190 | description = "Allow ssh traffic." 191 | }, 192 | { 193 | rule_count = 2 194 | from_port = 80 195 | protocol = "http" 196 | to_port = 80 197 | cidr_blocks = [module.vpc.vpc_cidr_block] 198 | description = "Allow http traffic." 199 | }, 200 | { 201 | rule_count = 3 202 | from_port = 443 203 | protocol = "https" 204 | to_port = 443 205 | cidr_blocks = [module.vpc.vpc_cidr_block] 206 | description = "Allow https traffic." 207 | } 208 | ] 209 | 210 | ## EGRESS Rules 211 | new_sg_egress_rules_with_cidr_blocks = [{ 212 | rule_count = 1 213 | from_port = 0 214 | protocol = "-1" 215 | to_port = 0 216 | cidr_blocks = ["0.0.0.0/0"] 217 | ipv6_cidr_blocks = ["::/0"] 218 | description = "Allow all traffic." 219 | } 220 | ] 221 | } 222 | 223 | ################################################################################ 224 | # EKS Module call 225 | ################################################################################ 226 | module "eks" { 227 | source = "../.." 228 | 229 | name = local.name 230 | environment = "test" 231 | 232 | # EKS 233 | kubernetes_version = "1.32" 234 | endpoint_private_access = true 235 | endpoint_public_access = true 236 | # Networking 237 | vpc_id = module.vpc.vpc_id 238 | subnet_ids = module.subnets.private_subnet_id 239 | allowed_security_groups = [module.ssh.security_group_id] 240 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 241 | allowed_cidr_blocks = [local.vpc_cidr_block] 242 | 243 | # Self Managed Node Grou 244 | # Node Groups Defaults Values It will Work all Node Groups 245 | self_node_group_defaults = { 246 | subnet_ids = module.subnets.private_subnet_id 247 | propagate_tags = [{ 248 | key = "aws-node-termination-handler/managed" 249 | value = true 250 | propagate_at_launch = true 251 | }, 252 | { 253 | key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" 254 | value = "owned" 255 | propagate_at_launch = true 256 | 257 | } 258 | ] 259 | 260 | block_device_mappings = { 261 | xvda = { 262 | device_name = "/dev/xvda" 263 | ebs = { 264 | volume_size = 50 265 | volume_type = "gp3" 266 | iops = 3000 267 | throughput = 150 268 | } 269 | } 270 | } 271 | } 272 | 273 | self_node_groups = { 274 | critical = { 275 | name = "${module.eks.cluster_name}-critical" 276 | min_size = 1 277 | max_size = 7 278 | desired_size = 1 279 | bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" 280 | instance_type = "t3.medium" 281 | } 282 | application = { 283 | name = "${module.eks.cluster_name}-application" 284 | instance_market_options = { 285 | market_type = "spot" 286 | } 287 | min_size = 1 288 | max_size = 7 289 | desired_size = 1 290 | bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" 291 | instance_type = "t3.medium" 292 | } 293 | } 294 | # Schdule Self Managed Auto Scaling node group 295 | schedules = { 296 | scale-up = { 297 | min_size = 2 298 | max_size = 2 # Retains current max size 299 | desired_size = 2 300 | start_time = "2023-08-15T19:00:00Z" 301 | end_time = "2023-08-19T19:00:00Z" 302 | timezone = "Europe/Amsterdam" 303 | recurrence = "0 7 * * 1" 304 | }, 305 | scale-down = { 306 | min_size = 0 307 | max_size = 0 # Retains current max size 308 | desired_size = 0 309 | start_time = "2023-08-12T12:00:00Z" 310 | end_time = "2024-03-05T12:00:00Z" 311 | timezone = "Europe/Amsterdam" 312 | recurrence = "0 7 * * 5" 313 | } 314 | } 315 | } 316 | # Kubernetes provider configuration 317 | data "aws_eks_cluster" "this" { 318 | name = module.eks.cluster_id 319 | } 320 | 321 | data "aws_eks_cluster_auth" "this" { 322 | name = module.eks.cluster_certificate_authority_data 323 | } 324 | # 325 | provider "kubernetes" { 326 | host = data.aws_eks_cluster.this.endpoint 327 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 328 | token = data.aws_eks_cluster_auth.this.token 329 | } -------------------------------------------------------------------------------- /examples/aws_managed/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | 5 | locals { 6 | name = "clouddrove-eks" 7 | region = "us-east-1" 8 | vpc_cidr_block = module.vpc.vpc_cidr_block 9 | additional_cidr_block = "172.16.0.0/16" 10 | environment = "test" 11 | label_order = ["name", "environment"] 12 | tags = { 13 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 14 | } 15 | } 16 | 17 | ################################################################################ 18 | # VPC module call 19 | ################################################################################ 20 | module "vpc" { 21 | source = "clouddrove/vpc/aws" 22 | version = "2.0.0" 23 | 24 | name = "${local.name}-vpc" 25 | environment = local.environment 26 | cidr_block = "10.10.0.0/16" 27 | } 28 | 29 | # ################################################################################ 30 | # # Subnets moudle call 31 | # ################################################################################ 32 | 33 | module "subnets" { 34 | source = "clouddrove/subnet/aws" 35 | version = "2.0.0" 36 | 37 | name = "${local.name}-subnet" 38 | environment = local.environment 39 | nat_gateway_enabled = true 40 | single_nat_gateway = true 41 | availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"] 42 | vpc_id = module.vpc.vpc_id 43 | type = "public-private" 44 | igw_id = module.vpc.igw_id 45 | cidr_block = local.vpc_cidr_block 46 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 47 | enable_ipv6 = false 48 | 49 | extra_public_tags = { 50 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 51 | "kubernetes.io/role/elb" = "1" 52 | } 53 | 54 | extra_private_tags = { 55 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 56 | "kubernetes.io/role/internal-elb" = "1" 57 | } 58 | 59 | public_inbound_acl_rules = [ 60 | { 61 | rule_number = 100 62 | rule_action = "allow" 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | cidr_block = "0.0.0.0/0" 67 | }, 68 | { 69 | rule_number = 101 70 | rule_action = "allow" 71 | from_port = 0 72 | to_port = 0 73 | protocol = "-1" 74 | ipv6_cidr_block = "::/0" 75 | }, 76 | ] 77 | 78 | public_outbound_acl_rules = [ 79 | { 80 | rule_number = 100 81 | rule_action = "allow" 82 | from_port = 0 83 | to_port = 0 84 | protocol = "-1" 85 | cidr_block = "0.0.0.0/0" 86 | }, 87 | { 88 | rule_number = 101 89 | rule_action = "allow" 90 | from_port = 0 91 | to_port = 0 92 | protocol = "-1" 93 | ipv6_cidr_block = "::/0" 94 | }, 95 | ] 96 | 97 | private_inbound_acl_rules = [ 98 | { 99 | rule_number = 100 100 | rule_action = "allow" 101 | from_port = 0 102 | to_port = 0 103 | protocol = "-1" 104 | cidr_block = "0.0.0.0/0" 105 | }, 106 | { 107 | rule_number = 101 108 | rule_action = "allow" 109 | from_port = 0 110 | to_port = 0 111 | protocol = "-1" 112 | ipv6_cidr_block = "::/0" 113 | }, 114 | ] 115 | 116 | private_outbound_acl_rules = [ 117 | { 118 | rule_number = 100 119 | rule_action = "allow" 120 | from_port = 0 121 | to_port = 0 122 | protocol = "-1" 123 | cidr_block = "0.0.0.0/0" 124 | }, 125 | { 126 | rule_number = 101 127 | rule_action = "allow" 128 | from_port = 0 129 | to_port = 0 130 | protocol = "-1" 131 | ipv6_cidr_block = "::/0" 132 | }, 133 | ] 134 | } 135 | 136 | # ################################################################################ 137 | # Security Groups module call 138 | ################################################################################ 139 | 140 | module "ssh" { 141 | source = "clouddrove/security-group/aws" 142 | version = "2.0.0" 143 | 144 | name = "${local.name}-ssh" 145 | environment = local.environment 146 | vpc_id = module.vpc.vpc_id 147 | new_sg_ingress_rules_with_cidr_blocks = [{ 148 | rule_count = 1 149 | from_port = 22 150 | protocol = "tcp" 151 | to_port = 22 152 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 153 | description = "Allow ssh traffic." 154 | }, 155 | { 156 | rule_count = 2 157 | from_port = 27017 158 | protocol = "tcp" 159 | to_port = 27017 160 | cidr_blocks = [local.additional_cidr_block] 161 | description = "Allow Mongodb traffic." 162 | } 163 | ] 164 | 165 | ## EGRESS Rules 166 | new_sg_egress_rules_with_cidr_blocks = [{ 167 | rule_count = 1 168 | from_port = 22 169 | protocol = "tcp" 170 | to_port = 22 171 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 172 | description = "Allow ssh outbound traffic." 173 | }, 174 | { 175 | rule_count = 2 176 | from_port = 27017 177 | protocol = "tcp" 178 | to_port = 27017 179 | cidr_blocks = [local.additional_cidr_block] 180 | description = "Allow Mongodb outbound traffic." 181 | }] 182 | } 183 | 184 | module "http_https" { 185 | source = "clouddrove/security-group/aws" 186 | version = "2.0.0" 187 | 188 | name = "${local.name}-http-https" 189 | environment = local.environment 190 | 191 | vpc_id = module.vpc.vpc_id 192 | ## INGRESS Rules 193 | new_sg_ingress_rules_with_cidr_blocks = [{ 194 | rule_count = 1 195 | from_port = 22 196 | protocol = "tcp" 197 | to_port = 22 198 | cidr_blocks = [local.vpc_cidr_block] 199 | description = "Allow ssh traffic." 200 | }, 201 | { 202 | rule_count = 2 203 | from_port = 80 204 | protocol = "tcp" 205 | to_port = 80 206 | cidr_blocks = [local.vpc_cidr_block] 207 | description = "Allow http traffic." 208 | }, 209 | { 210 | rule_count = 3 211 | from_port = 443 212 | protocol = "tcp" 213 | to_port = 443 214 | cidr_blocks = [local.vpc_cidr_block] 215 | description = "Allow https traffic." 216 | } 217 | ] 218 | 219 | ## EGRESS Rules 220 | new_sg_egress_rules_with_cidr_blocks = [{ 221 | rule_count = 1 222 | from_port = 0 223 | protocol = "-1" 224 | to_port = 0 225 | cidr_blocks = ["0.0.0.0/0"] 226 | ipv6_cidr_blocks = ["::/0"] 227 | description = "Allow all traffic." 228 | } 229 | ] 230 | } 231 | 232 | ################################################################################ 233 | # KMS Module call 234 | ################################################################################ 235 | module "kms" { 236 | source = "clouddrove/kms/aws" 237 | version = "1.3.0" 238 | 239 | name = "${local.name}-kms" 240 | environment = local.environment 241 | label_order = local.label_order 242 | enabled = true 243 | description = "KMS key for EBS of EKS nodes" 244 | enable_key_rotation = false 245 | policy = data.aws_iam_policy_document.kms.json 246 | } 247 | 248 | data "aws_iam_policy_document" "kms" { 249 | version = "2012-10-17" 250 | statement { 251 | sid = "Enable IAM User Permissions" 252 | effect = "Allow" 253 | principals { 254 | type = "AWS" 255 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 256 | } 257 | actions = ["kms:*"] 258 | resources = ["*"] 259 | } 260 | } 261 | 262 | data "aws_caller_identity" "current" {} 263 | 264 | ################################################################################ 265 | # EKS Module call 266 | ################################################################################ 267 | module "eks" { 268 | source = "../.." 269 | enabled = true 270 | 271 | name = local.name 272 | environment = local.environment 273 | label_order = local.label_order 274 | 275 | # EKS 276 | kubernetes_version = "1.32" 277 | endpoint_public_access = true 278 | # Networking 279 | vpc_id = module.vpc.vpc_id 280 | subnet_ids = module.subnets.private_subnet_id 281 | allowed_security_groups = [module.ssh.security_group_id] 282 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 283 | allowed_cidr_blocks = [local.vpc_cidr_block] 284 | 285 | # AWS Managed Node Group 286 | # Node Groups Defaults Values It will Work all Node Groups 287 | managed_node_group_defaults = { 288 | subnet_ids = module.subnets.private_subnet_id 289 | nodes_additional_security_group_ids = [module.ssh.security_group_id] 290 | tags = { 291 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 292 | "k8s.io/cluster/${module.eks.cluster_name}" = "shared" 293 | } 294 | block_device_mappings = { 295 | xvda = { 296 | device_name = "/dev/xvda" 297 | ebs = { 298 | volume_size = 50 299 | volume_type = "gp3" 300 | iops = 3000 301 | throughput = 150 302 | encrypted = true 303 | kms_key_id = module.kms.key_arn 304 | } 305 | } 306 | } 307 | } 308 | managed_node_group = { 309 | critical = { 310 | name = "${module.eks.cluster_name}-critical" 311 | capacity_type = "ON_DEMAND" 312 | min_size = 1 313 | max_size = 2 314 | desired_size = 2 315 | instance_types = ["t3.medium"] 316 | ami_type = "BOTTLEROCKET_x86_64" 317 | } 318 | 319 | application = { 320 | name = "${module.eks.cluster_name}-application" 321 | capacity_type = "SPOT" 322 | min_size = 1 323 | max_size = 2 324 | desired_size = 1 325 | force_update_version = true 326 | instance_types = ["t3.medium"] 327 | ami_type = "BOTTLEROCKET_x86_64" 328 | } 329 | } 330 | 331 | apply_config_map_aws_auth = true 332 | map_additional_iam_users = [ 333 | { 334 | userarn = "arn:aws:iam::123456789:user/hello@clouddrove.com" 335 | username = "hello@clouddrove.com" 336 | groups = ["system:masters"] 337 | } 338 | ] 339 | } 340 | ## Kubernetes provider configuration 341 | data "aws_eks_cluster" "this" { 342 | depends_on = [module.eks] 343 | name = module.eks.cluster_id 344 | } 345 | 346 | data "aws_eks_cluster_auth" "this" { 347 | depends_on = [module.eks] 348 | name = module.eks.cluster_id 349 | } 350 | 351 | provider "kubernetes" { 352 | host = data.aws_eks_cluster.this.endpoint 353 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 354 | token = data.aws_eks_cluster_auth.this.token 355 | } 356 | -------------------------------------------------------------------------------- /node_group/aws_managed/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | 3 | data "aws_caller_identity" "current" {} 4 | 5 | #Module : label 6 | #Description : Terraform module to create consistent naming for multiple names. 7 | module "labels" { 8 | source = "clouddrove/labels/aws" 9 | version = "1.3.0" 10 | 11 | name = var.name 12 | repository = var.repository 13 | environment = var.environment 14 | managedby = var.managedby 15 | extra_tags = var.tags 16 | attributes = compact(concat(var.attributes, ["nodes"])) 17 | label_order = var.label_order 18 | } 19 | 20 | 21 | ################################################################################ 22 | # Launch template 23 | ################################################################################ 24 | 25 | 26 | 27 | resource "aws_launch_template" "this" { 28 | count = var.enabled ? 1 : 0 29 | name = module.labels.id 30 | description = var.launch_template_description 31 | 32 | ebs_optimized = var.ebs_optimized 33 | image_id = var.ami_id 34 | # # Set on node group instead 35 | # instance_type = var.launch_template_instance_type 36 | key_name = var.key_name 37 | user_data = var.before_cluster_joining_userdata 38 | vpc_security_group_ids = var.vpc_security_group_ids 39 | 40 | disable_api_termination = var.disable_api_termination 41 | kernel_id = var.kernel_id 42 | ram_disk_id = var.ram_disk_id 43 | 44 | dynamic "block_device_mappings" { 45 | for_each = var.block_device_mappings 46 | content { 47 | device_name = block_device_mappings.value.device_name 48 | no_device = lookup(block_device_mappings.value, "no_device", null) 49 | virtual_name = lookup(block_device_mappings.value, "virtual_name", null) 50 | 51 | dynamic "ebs" { 52 | for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) 53 | content { 54 | delete_on_termination = true 55 | encrypted = true 56 | kms_key_id = var.kms_key_id 57 | iops = lookup(ebs.value, "iops", null) 58 | throughput = lookup(ebs.value, "throughput", null) 59 | snapshot_id = lookup(ebs.value, "snapshot_id", null) 60 | volume_size = lookup(ebs.value, "volume_size", null) 61 | volume_type = lookup(ebs.value, "volume_type", null) 62 | } 63 | } 64 | } 65 | } 66 | 67 | dynamic "capacity_reservation_specification" { 68 | for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : [] 69 | content { 70 | capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) 71 | 72 | dynamic "capacity_reservation_target" { 73 | for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", []) 74 | content { 75 | capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) 76 | } 77 | } 78 | } 79 | } 80 | 81 | dynamic "cpu_options" { 82 | for_each = var.cpu_options != null ? [var.cpu_options] : [] 83 | content { 84 | core_count = cpu_options.value.core_count 85 | threads_per_core = cpu_options.value.threads_per_core 86 | } 87 | } 88 | 89 | dynamic "credit_specification" { 90 | for_each = var.credit_specification != null ? [var.credit_specification] : [] 91 | content { 92 | cpu_credits = credit_specification.value.cpu_credits 93 | } 94 | } 95 | 96 | dynamic "enclave_options" { 97 | for_each = var.enclave_options != null ? [var.enclave_options] : [] 98 | content { 99 | enabled = enclave_options.value.enabled 100 | } 101 | } 102 | 103 | dynamic "license_specification" { 104 | for_each = var.license_specifications != null ? [var.license_specifications] : [] 105 | content { 106 | license_configuration_arn = license_specifications.value.license_configuration_arn 107 | } 108 | } 109 | 110 | dynamic "metadata_options" { 111 | for_each = var.metadata_options != null ? [var.metadata_options] : [] 112 | content { 113 | http_endpoint = lookup(metadata_options.value, "http_endpoint", null) 114 | http_tokens = lookup(metadata_options.value, "http_tokens", null) 115 | http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) 116 | http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) 117 | instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) 118 | } 119 | } 120 | 121 | dynamic "monitoring" { 122 | for_each = var.enable_monitoring != null ? [1] : [] 123 | content { 124 | enabled = var.enable_monitoring 125 | } 126 | } 127 | 128 | dynamic "network_interfaces" { 129 | for_each = var.network_interfaces 130 | content { 131 | associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) 132 | associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) 133 | delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) 134 | description = lookup(network_interfaces.value, "description", null) 135 | device_index = lookup(network_interfaces.value, "device_index", null) 136 | ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : [] 137 | ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) 138 | ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : [] 139 | ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) 140 | network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) 141 | private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) 142 | security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : [] 143 | # Set on EKS managed node group, will fail if set here 144 | # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics 145 | # subnet_id = lookup(network_interfaces.value, "subnet_id", null) 146 | } 147 | } 148 | 149 | dynamic "placement" { 150 | for_each = var.placement != null ? [var.placement] : [] 151 | content { 152 | affinity = lookup(placement.value, "affinity", null) 153 | availability_zone = lookup(placement.value, "availability_zone", null) 154 | group_name = lookup(placement.value, "group_name", null) 155 | host_id = lookup(placement.value, "host_id", null) 156 | spread_domain = lookup(placement.value, "spread_domain", null) 157 | tenancy = lookup(placement.value, "tenancy", null) 158 | partition_number = lookup(placement.value, "partition_number", null) 159 | } 160 | } 161 | 162 | dynamic "tag_specifications" { 163 | for_each = toset(["instance", "volume", "network-interface"]) 164 | content { 165 | resource_type = tag_specifications.key 166 | tags = merge( 167 | module.labels.tags, 168 | { Name = module.labels.id }) 169 | } 170 | } 171 | 172 | 173 | lifecycle { 174 | create_before_destroy = true 175 | } 176 | 177 | tags = module.labels.tags 178 | } 179 | 180 | ################################################################################ 181 | # Node Group 182 | ################################################################################ 183 | 184 | resource "aws_eks_node_group" "this" { 185 | count = var.enabled ? 1 : 0 186 | 187 | # Required 188 | cluster_name = var.cluster_name 189 | node_role_arn = var.iam_role_arn 190 | subnet_ids = var.subnet_ids 191 | 192 | scaling_config { 193 | min_size = var.min_size 194 | max_size = var.max_size 195 | desired_size = var.desired_size 196 | } 197 | 198 | # Optional 199 | node_group_name = module.labels.id 200 | 201 | # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami 202 | ami_type = var.ami_id != "" ? null : var.ami_type 203 | release_version = var.ami_id != "" ? null : var.ami_release_version 204 | version = var.ami_id != "" ? null : var.cluster_version 205 | 206 | capacity_type = var.capacity_type 207 | disk_size = var.disk_size 208 | force_update_version = var.force_update_version 209 | instance_types = var.instance_types 210 | labels = var.labels 211 | 212 | dynamic "launch_template" { 213 | for_each = var.enabled ? [1] : [] 214 | content { 215 | name = try(aws_launch_template.this[0].name) 216 | version = try(aws_launch_template.this[0].latest_version) 217 | } 218 | } 219 | 220 | dynamic "remote_access" { 221 | for_each = length(var.remote_access) > 0 ? [var.remote_access] : [] 222 | content { 223 | ec2_ssh_key = try(remote_access.value.ec2_ssh_key, null) 224 | source_security_group_ids = try(remote_access.value.source_security_group_ids, []) 225 | } 226 | } 227 | 228 | dynamic "taint" { 229 | for_each = var.taints 230 | content { 231 | key = taint.value.key 232 | value = lookup(taint.value, "value") 233 | effect = taint.value.effect 234 | } 235 | } 236 | 237 | dynamic "update_config" { 238 | for_each = length(var.update_config) > 0 ? [var.update_config] : [] 239 | content { 240 | max_unavailable_percentage = try(update_config.value.max_unavailable_percentage, null) 241 | max_unavailable = try(update_config.value.max_unavailable, null) 242 | } 243 | } 244 | 245 | timeouts { 246 | create = lookup(var.timeouts, "create", null) 247 | update = lookup(var.timeouts, "update", null) 248 | delete = lookup(var.timeouts, "delete", null) 249 | } 250 | 251 | lifecycle { 252 | create_before_destroy = true 253 | ignore_changes = [ 254 | scaling_config[0].desired_size, 255 | ] 256 | } 257 | 258 | tags = module.labels.tags 259 | } 260 | 261 | #-----------------------------------------------ASG-Schedule---------------------------------------------------------------- 262 | 263 | resource "aws_autoscaling_schedule" "this" { 264 | for_each = var.enabled && var.create_schedule ? var.schedules : {} 265 | 266 | scheduled_action_name = each.key 267 | autoscaling_group_name = aws_eks_node_group.this[0].resources[0].autoscaling_groups[0].name 268 | 269 | min_size = lookup(each.value, "min_size", null) 270 | max_size = lookup(each.value, "max_size", null) 271 | desired_capacity = lookup(each.value, "desired_size", null) 272 | start_time = lookup(each.value, "start_time", null) 273 | end_time = lookup(each.value, "end_time", null) 274 | time_zone = lookup(each.value, "time_zone", null) 275 | 276 | # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] 277 | # Cron examples: https://crontab.guru/examples.html 278 | recurrence = lookup(each.value, "recurrence", null) 279 | } -------------------------------------------------------------------------------- /examples/aws_managed_with_fargate/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | 5 | locals { 6 | name = "clouddrove-eks" 7 | region = "eu-west-1" 8 | vpc_cidr_block = module.vpc.vpc_cidr_block 9 | additional_cidr_block = "172.16.0.0/16" 10 | environment = "test" 11 | label_order = ["name", "environment"] 12 | tags = { 13 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 14 | } 15 | } 16 | 17 | ################################################################################ 18 | # VPC module call 19 | ################################################################################ 20 | module "vpc" { 21 | source = "clouddrove/vpc/aws" 22 | version = "2.0.0" 23 | 24 | name = "${local.name}-vpc" 25 | environment = local.environment 26 | cidr_block = "10.10.0.0/16" 27 | } 28 | 29 | # ################################################################################ 30 | # # Subnets moudle call 31 | # ################################################################################ 32 | 33 | module "subnets" { 34 | source = "clouddrove/subnet/aws" 35 | version = "2.0.0" 36 | 37 | name = "${local.name}-subnet" 38 | environment = local.environment 39 | nat_gateway_enabled = true 40 | single_nat_gateway = true 41 | availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"] 42 | vpc_id = module.vpc.vpc_id 43 | type = "public-private" 44 | igw_id = module.vpc.igw_id 45 | cidr_block = local.vpc_cidr_block 46 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 47 | enable_ipv6 = false 48 | 49 | extra_public_tags = { 50 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 51 | "kubernetes.io/role/elb" = "1" 52 | } 53 | 54 | extra_private_tags = { 55 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 56 | "kubernetes.io/role/internal-elb" = "1" 57 | } 58 | 59 | public_inbound_acl_rules = [ 60 | { 61 | rule_number = 100 62 | rule_action = "allow" 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | cidr_block = "0.0.0.0/0" 67 | }, 68 | { 69 | rule_number = 101 70 | rule_action = "allow" 71 | from_port = 0 72 | to_port = 0 73 | protocol = "-1" 74 | ipv6_cidr_block = "::/0" 75 | }, 76 | ] 77 | 78 | public_outbound_acl_rules = [ 79 | { 80 | rule_number = 100 81 | rule_action = "allow" 82 | from_port = 0 83 | to_port = 0 84 | protocol = "-1" 85 | cidr_block = "0.0.0.0/0" 86 | }, 87 | { 88 | rule_number = 101 89 | rule_action = "allow" 90 | from_port = 0 91 | to_port = 0 92 | protocol = "-1" 93 | ipv6_cidr_block = "::/0" 94 | }, 95 | ] 96 | 97 | private_inbound_acl_rules = [ 98 | { 99 | rule_number = 100 100 | rule_action = "allow" 101 | from_port = 0 102 | to_port = 0 103 | protocol = "-1" 104 | cidr_block = "0.0.0.0/0" 105 | }, 106 | { 107 | rule_number = 101 108 | rule_action = "allow" 109 | from_port = 0 110 | to_port = 0 111 | protocol = "-1" 112 | ipv6_cidr_block = "::/0" 113 | }, 114 | ] 115 | 116 | private_outbound_acl_rules = [ 117 | { 118 | rule_number = 100 119 | rule_action = "allow" 120 | from_port = 0 121 | to_port = 0 122 | protocol = "-1" 123 | cidr_block = "0.0.0.0/0" 124 | }, 125 | { 126 | rule_number = 101 127 | rule_action = "allow" 128 | from_port = 0 129 | to_port = 0 130 | protocol = "-1" 131 | ipv6_cidr_block = "::/0" 132 | }, 133 | ] 134 | } 135 | 136 | 137 | # ################################################################################ 138 | # Security Groups module call 139 | ################################################################################ 140 | 141 | module "ssh" { 142 | source = "clouddrove/security-group/aws" 143 | version = "2.0.0" 144 | 145 | name = "${local.name}-ssh" 146 | environment = local.environment 147 | vpc_id = module.vpc.vpc_id 148 | new_sg_ingress_rules_with_cidr_blocks = [{ 149 | rule_count = 1 150 | from_port = 22 151 | protocol = "tcp" 152 | to_port = 22 153 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 154 | description = "Allow ssh traffic." 155 | }, 156 | { 157 | rule_count = 2 158 | from_port = 27017 159 | protocol = "tcp" 160 | to_port = 27017 161 | cidr_blocks = [local.additional_cidr_block] 162 | description = "Allow Mongodb traffic." 163 | } 164 | ] 165 | 166 | ## EGRESS Rules 167 | new_sg_egress_rules_with_cidr_blocks = [{ 168 | rule_count = 1 169 | from_port = 22 170 | protocol = "tcp" 171 | to_port = 22 172 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 173 | description = "Allow ssh outbound traffic." 174 | }, 175 | { 176 | rule_count = 2 177 | from_port = 27017 178 | protocol = "tcp" 179 | to_port = 27017 180 | cidr_blocks = [local.additional_cidr_block] 181 | description = "Allow Mongodb outbound traffic." 182 | }] 183 | } 184 | 185 | module "http_https" { 186 | source = "clouddrove/security-group/aws" 187 | version = "2.0.0" 188 | 189 | name = "${local.name}-http-https" 190 | environment = local.environment 191 | 192 | vpc_id = module.vpc.vpc_id 193 | ## INGRESS Rules 194 | new_sg_ingress_rules_with_cidr_blocks = [{ 195 | rule_count = 1 196 | from_port = 22 197 | protocol = "tcp" 198 | to_port = 22 199 | cidr_blocks = [local.vpc_cidr_block] 200 | description = "Allow ssh traffic." 201 | }, 202 | { 203 | rule_count = 2 204 | from_port = 80 205 | protocol = "tcp" 206 | to_port = 80 207 | cidr_blocks = [local.vpc_cidr_block] 208 | description = "Allow http traffic." 209 | }, 210 | { 211 | rule_count = 3 212 | from_port = 443 213 | protocol = "tcp" 214 | to_port = 443 215 | cidr_blocks = [local.vpc_cidr_block] 216 | description = "Allow https traffic." 217 | } 218 | ] 219 | 220 | ## EGRESS Rules 221 | new_sg_egress_rules_with_cidr_blocks = [{ 222 | rule_count = 1 223 | from_port = 0 224 | protocol = "-1" 225 | to_port = 0 226 | cidr_blocks = ["0.0.0.0/0"] 227 | ipv6_cidr_blocks = ["::/0"] 228 | description = "Allow all traffic." 229 | } 230 | ] 231 | } 232 | 233 | ################################################################################ 234 | # KMS Module call 235 | ################################################################################ 236 | module "kms" { 237 | source = "clouddrove/kms/aws" 238 | version = "1.3.0" 239 | 240 | name = "${local.name}-kms" 241 | environment = local.environment 242 | label_order = local.label_order 243 | enabled = true 244 | description = "KMS key for EBS of EKS nodes" 245 | enable_key_rotation = true 246 | policy = data.aws_iam_policy_document.kms.json 247 | } 248 | 249 | data "aws_iam_policy_document" "kms" { 250 | version = "2012-10-17" 251 | statement { 252 | sid = "Enable IAM User Permissions" 253 | effect = "Allow" 254 | principals { 255 | type = "AWS" 256 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 257 | } 258 | actions = ["kms:*"] 259 | resources = ["*"] 260 | } 261 | } 262 | 263 | data "aws_caller_identity" "current" {} 264 | 265 | ################################################################################ 266 | # EKS Module call 267 | ################################################################################ 268 | module "eks" { 269 | source = "../.." 270 | enabled = true 271 | 272 | name = local.name 273 | environment = local.environment 274 | label_order = local.label_order 275 | 276 | # EKS 277 | kubernetes_version = "1.32" 278 | endpoint_public_access = true 279 | # Networking 280 | vpc_id = module.vpc.vpc_id 281 | subnet_ids = module.subnets.private_subnet_id 282 | allowed_security_groups = [module.ssh.security_group_id] 283 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 284 | allowed_cidr_blocks = [local.vpc_cidr_block] 285 | 286 | # AWS Managed Node Group 287 | # Node Groups Defaults Values It will Work all Node Groups 288 | managed_node_group_defaults = { 289 | subnet_ids = module.subnets.private_subnet_id 290 | nodes_additional_security_group_ids = [module.ssh.security_group_id] 291 | tags = { 292 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 293 | "k8s.io/cluster/${module.eks.cluster_name}" = "shared" 294 | } 295 | block_device_mappings = { 296 | xvda = { 297 | device_name = "/dev/xvda" 298 | ebs = { 299 | volume_size = 50 300 | volume_type = "gp3" 301 | iops = 3000 302 | throughput = 150 303 | encrypted = true 304 | kms_key_id = module.kms.key_arn 305 | } 306 | } 307 | } 308 | } 309 | managed_node_group = { 310 | critical = { 311 | name = "${module.eks.cluster_name}-critical" 312 | capacity_type = "SPOT" 313 | min_size = 1 314 | max_size = 2 315 | desired_size = 2 316 | instance_types = ["t3.medium"] 317 | ami_type = "BOTTLEROCKET_x86_64" 318 | } 319 | 320 | application = { 321 | name = "${module.eks.cluster_name}-application" 322 | capacity_type = "SPOT" 323 | min_size = 1 324 | max_size = 2 325 | desired_size = 1 326 | force_update_version = true 327 | instance_types = ["t3.medium"] 328 | ami_type = "BOTTLEROCKET_x86_64" 329 | } 330 | } 331 | # -- Set this to `true` only when you have correct iam_user details. 332 | apply_config_map_aws_auth = true 333 | map_additional_iam_users = [ 334 | { 335 | userarn = "arn:aws:iam::123456789:user/hello@clouddrove.com" 336 | username = "hello@clouddrove.com" 337 | groups = ["system:masters"] 338 | } 339 | ] 340 | #fargate profile 341 | fargate_enabled = true 342 | fargate_profiles = { 343 | profile-0 = { 344 | addon_name = "0" 345 | namespace = "default" 346 | } 347 | } 348 | } 349 | ## Kubernetes provider configuration 350 | data "aws_eks_cluster" "this" { 351 | depends_on = [module.eks] 352 | name = module.eks.cluster_id 353 | } 354 | 355 | data "aws_eks_cluster_auth" "this" { 356 | depends_on = [module.eks] 357 | name = module.eks.cluster_certificate_authority_data 358 | } 359 | 360 | provider "kubernetes" { 361 | host = data.aws_eks_cluster.this.endpoint 362 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 363 | token = data.aws_eks_cluster_auth.this.token 364 | } 365 | -------------------------------------------------------------------------------- /docs/io.md: -------------------------------------------------------------------------------- 1 | ## Inputs 2 | 3 | | Name | Description | Type | Default | Required | 4 | |------|-------------|------|---------|:--------:| 5 | | access\_entries | Map of access entries to add to the cluster | `map(any)` | `{}` | no | 6 | | addons | Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources. | `any` | `[]` | no | 7 | | allowed\_cidr\_blocks | List of CIDR blocks to be allowed to connect to the EKS cluster. | `list(string)` | `[]` | no | 8 | | allowed\_security\_groups | List of Security Group IDs to be allowed to connect to the EKS cluster. | `list(string)` | `[]` | no | 9 | | apply\_config\_map\_aws\_auth | Whether to generate local files from `kubeconfig` and `config_map_aws_auth` and perform `kubectl apply` to apply the ConfigMap to allow the worker nodes to join the EKS cluster. | `bool` | `true` | no | 10 | | attributes | Additional attributes (e.g. `1`). | `list(any)` | `[]` | no | 11 | | authentication\_mode | The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` | `string` | `"CONFIG_MAP"` | no | 12 | | bootstrap\_self\_managed\_addons | Indicates whether or not to bootstrap self-managed addons after the cluster has been created | `bool` | `null` | no | 13 | | cluster\_compute\_config | Configuration block for the cluster compute configuration | `any` | `{}` | no | 14 | | cluster\_encryption\_config\_enabled | Set to `true` to enable Cluster Encryption Configuration | `bool` | `true` | no | 15 | | cluster\_encryption\_config\_kms\_key\_deletion\_window\_in\_days | Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction | `number` | `10` | no | 16 | | cluster\_encryption\_config\_kms\_key\_enable\_key\_rotation | Cluster Encryption Config KMS Key Resource argument - enable kms key rotation | `bool` | `true` | no | 17 | | cluster\_encryption\_config\_kms\_key\_policy | Cluster Encryption Config KMS Key Resource argument - key policy | `string` | `null` | no | 18 | | cluster\_encryption\_config\_resources | Cluster Encryption Config Resources to encrypt, e.g. ['secrets'] | `list(any)` |
[
"secrets"
]
| no | 19 | | cluster\_ip\_family | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `null` | no | 20 | | cluster\_log\_retention\_period | Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. | `number` | `30` | no | 21 | | cluster\_name | Name of eks cluster | `string` | `""` | no | 22 | | cluster\_remote\_network\_config | Configuration block for the cluster remote network configuration | `any` | `{}` | no | 23 | | cluster\_service\_ipv4\_cidr | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | 24 | | cluster\_service\_ipv6\_cidr | The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster | `string` | `null` | no | 25 | | cluster\_timeouts | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | 26 | | cluster\_upgrade\_policy | Configuration block for the cluster upgrade policy | `any` | `{}` | no | 27 | | cluster\_zonal\_shift\_config | Configuration block for the cluster zonal shift | `any` | `{}` | no | 28 | | create | Controls if resources should be created (affects nearly all resources) | `bool` | `false` | no | 29 | | create\_node\_iam\_role | Determines whether an EKS Auto node IAM role is created | `bool` | `true` | no | 30 | | create\_schedule | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | 31 | | eks\_additional\_security\_group\_ids | EKS additional security group id | `list(string)` | `[]` | no | 32 | | eks\_tags | Additional tags for EKS Cluster only. | `map(any)` | `{}` | no | 33 | | enable\_cluster\_creator\_admin\_permissions | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry | `bool` | `true` | no | 34 | | enabled | Whether to create the resources. Set to `false` to prevent the module from creating any resources. | `bool` | `true` | no | 35 | | enabled\_cluster\_log\_types | A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]. | `list(string)` |
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
| no | 36 | | endpoint\_private\_access | Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false. | `bool` | `true` | no | 37 | | endpoint\_public\_access | Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true. | `bool` | `true` | no | 38 | | environment | Environment (e.g. `prod`, `dev`, `staging`). | `string` | `""` | no | 39 | | external\_cluster | Set to true to create an AWS-managed node group for an existing EKS cluster. Assumes the EKS cluster is already provisioned. | `bool` | `false` | no | 40 | | fargate\_enabled | Whether fargate profile is enabled or not | `bool` | `false` | no | 41 | | fargate\_profiles | The number of Fargate Profiles that would be created. | `map(any)` | `{}` | no | 42 | | iam\_role\_additional\_policies | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | 43 | | kubernetes\_version | Desired Kubernetes master version. If you do not specify a value, the latest available version is used. | `string` | `""` | no | 44 | | label\_order | Label order, e.g. `name`,`application`. | `list(any)` |
[
"name",
"environment"
]
| no | 45 | | local\_exec\_interpreter | shell to use for local\_exec | `list(string)` |
[
"/bin/sh",
"-c"
]
| no | 46 | | managed\_node\_group | Map of eks-managed node group definitions to create | `any` | `{}` | no | 47 | | managed\_node\_group\_defaults | Map of eks-managed node group definitions to create | `any` | `{}` | no | 48 | | managedby | ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'. | `string` | `"hello@clouddrove.com"` | no | 49 | | map\_additional\_aws\_accounts | Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap | `list(string)` | `[]` | no | 50 | | map\_additional\_iam\_roles | Additional IAM roles to add to `config-map-aws-auth` ConfigMap |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | 51 | | map\_additional\_iam\_users | Additional IAM users to add to `config-map-aws-auth` ConfigMap |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | 52 | | name | Name (e.g. `app` or `cluster`). | `string` | `""` | no | 53 | | node\_iam\_role\_additional\_policies | Additional policies to be added to the EKS Auto node IAM role | `map(string)` | `{}` | no | 54 | | node\_iam\_role\_description | Description of the EKS Auto node IAM role | `string` | `null` | no | 55 | | node\_iam\_role\_name | Name to use on the EKS Auto node IAM role created | `string` | `null` | no | 56 | | node\_iam\_role\_path | The EKS Auto node IAM role path | `string` | `null` | no | 57 | | node\_iam\_role\_permissions\_boundary | ARN of the policy that is used to set the permissions boundary for the EKS Auto node IAM role | `string` | `null` | no | 58 | | node\_iam\_role\_tags | A map of additional tags to add to the EKS Auto node IAM role created | `map(string)` | `{}` | no | 59 | | node\_iam\_role\_use\_name\_prefix | Determines whether the EKS Auto node IAM role name (`node_iam_role_name`) is used as a prefix | `bool` | `true` | no | 60 | | node\_role\_arn | IAM Role ARN to be used by NodeGroup. Refer to https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html for more details. | `string` | `""` | no | 61 | | nodes\_additional\_security\_group\_ids | EKS additional node group ids | `list(string)` | `[]` | no | 62 | | oidc\_provider\_enabled | Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html | `bool` | `true` | no | 63 | | openid\_connect\_audiences | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | 64 | | outpost\_config | Configuration for the AWS Outpost to provision the cluster on | `any` | `{}` | no | 65 | | permissions\_boundary | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no | 66 | | public\_access\_cidrs | Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0. | `list(string)` |
[
"0.0.0.0/0"
]
| no | 67 | | region | AWS region to create the EKS cluster in | `string` | `""` | no | 68 | | repository | Terraform current module repo | `string` | `"https://github.com/clouddrove/terraform-aws-eks"` | no | 69 | | schedules | Map of autoscaling group schedule to create | `map(any)` | `{}` | no | 70 | | self\_node\_group\_defaults | Map of self-managed node group default configurations | `any` | `{}` | no | 71 | | self\_node\_groups | Map of self-managed node group definitions to create | `any` | `{}` | no | 72 | | subnet\_filter\_name | The name of the subnet filter (e.g., tag:kubernetes.io/cluster/CLUSTER\_NAME) | `string` | `""` | no | 73 | | subnet\_filter\_values | List of values for the subnet filter (e.g., owned, shared) | `list(string)` | `[]` | no | 74 | | subnet\_ids | A list of subnet IDs to launch the cluster in. | `list(string)` | `[]` | no | 75 | | tags | Additional tags (e.g. map(`BusinessUnit`,`XYZ`). | `map(any)` | `{}` | no | 76 | | vpc\_id | VPC ID for the EKS cluster. | `string` | `""` | no | 77 | | vpc\_security\_group\_ids | A list of security group IDs to associate | `list(string)` | `[]` | no | 78 | | wait\_for\_cluster\_command | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT` | `string` | `"curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz"` | no | 79 | 80 | ## Outputs 81 | 82 | | Name | Description | 83 | |------|-------------| 84 | | cluster\_arn | The Amazon Resource Name (ARN) of the cluster | 85 | | cluster\_certificate\_authority\_data | Base64 encoded certificate data required to communicate with the cluster | 86 | | cluster\_endpoint | Endpoint for your Kubernetes API server | 87 | | cluster\_iam\_role\_arn | IAM role ARN of the EKS cluster | 88 | | cluster\_iam\_role\_name | IAM role name of the EKS cluster | 89 | | cluster\_iam\_role\_unique\_id | Stable and unique string identifying the IAM role | 90 | | cluster\_id | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | 91 | | cluster\_name | n/a | 92 | | cluster\_oidc\_issuer\_url | The URL on the EKS cluster for the OpenID Connect identity provider | 93 | | cluster\_platform\_version | Platform version for the cluster | 94 | | cluster\_primary\_security\_group\_id | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use default security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | 95 | | cluster\_status | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | 96 | | node\_group\_iam\_role\_arn | IAM role ARN of the EKS cluster | 97 | | node\_group\_iam\_role\_name | IAM role name of the EKS cluster | 98 | | node\_group\_iam\_role\_unique\_id | Stable and unique string identifying the IAM role | 99 | | node\_security\_group\_arn | Amazon Resource Name (ARN) of the node shared security group | 100 | | node\_security\_group\_id | ID of the node shared security group | 101 | | oidc\_provider\_arn | The ARN of the OIDC Provider if `enable_irsa = true` | 102 | | tags | n/a | 103 | 104 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [1.4.5] - 2025-08-29 8 | ### :sparkles: New Features 9 | - [`a72df1b`](https://github.com/clouddrove/terraform-aws-eks/commit/a72df1bc0e5d2133f1d44c54a0ecb826e78d8ad2) - add example for attaching node group to existing EKS cluster *(PR [#77](https://github.com/clouddrove/terraform-aws-eks/pull/77) by [@Arzianghanchi](https://github.com/Arzianghanchi))* 10 | 11 | ### :bug: Bug Fixes 12 | - [`b00656b`](https://github.com/clouddrove/terraform-aws-eks/commit/b00656be40908773e3d800b961abc64179feb7a8) - Deprecated arguments *(PR [#78](https://github.com/clouddrove/terraform-aws-eks/pull/78) by [@ruchit-sharma09](https://github.com/ruchit-sharma09))* 13 | - [`5eaa9d1`](https://github.com/clouddrove/terraform-aws-eks/commit/5eaa9d19cf5b2214e42318ee40ea904923119e53) - unauthorized error in aws_auth configmap for EKS *(PR [#79](https://github.com/clouddrove/terraform-aws-eks/pull/79) by [@Arzianghanchi](https://github.com/Arzianghanchi))* 14 | 15 | ### :memo: Documentation Changes 16 | - [`1955c86`](https://github.com/clouddrove/terraform-aws-eks/commit/1955c865a6d7d25f2bba52f593b9a42fccecbd42) - update CHANGELOG.md for 1.4.4 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 17 | 18 | 19 | ## [1.4.4] - 2025-06-10 20 | ### :sparkles: New Features 21 | - [`675b503`](https://github.com/clouddrove/terraform-aws-eks/commit/675b503a973bb99264a23161366e1233a14d0951) - Added EKS Automode Feature *(PR [#76](https://github.com/clouddrove/terraform-aws-eks/pull/76) by [@ruchit-sharma09](https://github.com/ruchit-sharma09))* 22 | 23 | ### :memo: Documentation Changes 24 | - [`0245d6b`](https://github.com/clouddrove/terraform-aws-eks/commit/0245d6b7aaaa7c499a5aa5440d2d785f594c5f9c) - update CHANGELOG.md for 1.4.3 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 25 | 26 | 27 | ## [1.4.3] - 2025-04-04 28 | ### :sparkles: New Features 29 | - [`2e163bb`](https://github.com/clouddrove/terraform-aws-eks/commit/2e163bb2caf96ee03ddf8d9ec38c580844f0bf65) - custom NodeGroup names without environment prefix *(PR [#69](https://github.com/clouddrove/terraform-aws-eks/pull/69) by [@Arzianghanchi](https://github.com/Arzianghanchi))* 30 | - [`5dbcb0e`](https://github.com/clouddrove/terraform-aws-eks/commit/5dbcb0e2182ee9cd151d2208c1e9c2c535527ea7) - updated branch name in uses of workflow *(PR [#75](https://github.com/clouddrove/terraform-aws-eks/pull/75) by [@clouddrove-ci](https://github.com/clouddrove-ci))* 31 | 32 | ### :memo: Documentation Changes 33 | - [`a4d89bd`](https://github.com/clouddrove/terraform-aws-eks/commit/a4d89bd9d2fbb3fee77e8986eb4c40b701410790) - update CHANGELOG.md for 1.4.2 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 34 | 35 | 36 | ## [1.4.2] - 2024-09-04 37 | ### :sparkles: New Features 38 | - [`fa4ad11`](https://github.com/clouddrove/terraform-aws-eks/commit/fa4ad11ba153ee8c652943908999a1f4ee4ea30a) - updated branch name in uses of workflow *(PR [#65](https://github.com/clouddrove/terraform-aws-eks/pull/65) by [@rakeshclouddevops](https://github.com/rakeshclouddevops))* 39 | 40 | ### :bug: Bug Fixes 41 | - [`a48263e`](https://github.com/clouddrove/terraform-aws-eks/commit/a48263e285534befc17e6556bcf042688dccab00) - fix data block, data block was calling before eks cluster creation *(PR [#66](https://github.com/clouddrove/terraform-aws-eks/pull/66) by [@nileshgadgi](https://github.com/nileshgadgi))* 42 | 43 | ### :memo: Documentation Changes 44 | - [`afab46b`](https://github.com/clouddrove/terraform-aws-eks/commit/afab46b2a83c4dd72d9a940881cc2cb5aa4a82bb) - update CHANGELOG.md for 1.4.1 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 45 | 46 | 47 | ## [1.4.1] - 2024-05-07 48 | ### :sparkles: New Features 49 | - [`965397c`](https://github.com/clouddrove/terraform-aws-eks/commit/965397c8d9fbe80d079dc4134b028b16c60da607) - update github-action version and added automerge file *(PR [#61](https://github.com/clouddrove/terraform-aws-eks/pull/61) by [@theprashantyadav](https://github.com/theprashantyadav))* 50 | - [`cfd2b41`](https://github.com/clouddrove/terraform-aws-eks/commit/cfd2b411629688901588c768c59c93be8447b773) - updated example path and readme paramters *(commit by [@Tanveer143s](https://github.com/Tanveer143s))* 51 | 52 | ### :bug: Bug Fixes 53 | - [`5268f7c`](https://github.com/clouddrove/terraform-aws-eks/commit/5268f7ca95d02aa1639fa8a4a6f1af836ab95973) - Update kubernetes provider name and tag. *(PR [#64](https://github.com/clouddrove/terraform-aws-eks/pull/64) by [@nileshgadgi](https://github.com/nileshgadgi))* 54 | 55 | ### :memo: Documentation Changes 56 | - [`9824ae1`](https://github.com/clouddrove/terraform-aws-eks/commit/9824ae1dff440241a1d975b866795d27b000e444) - update CHANGELOG.md for 1.4.0 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 57 | 58 | 59 | ## [1.4.0] - 2023-09-18 60 | ### :sparkles: New Features 61 | - [`416b3a6`](https://github.com/clouddrove/terraform-aws-eks/commit/416b3a69851bd662faa42ddda561331df3f12c11) - added default eks addons *(commit by [@h1manshu98](https://github.com/h1manshu98))* 62 | - [`4ee24c4`](https://github.com/clouddrove/terraform-aws-eks/commit/4ee24c44638bf4f33a970c2a0605e383aac19f96) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 63 | - [`a63cc9a`](https://github.com/clouddrove/terraform-aws-eks/commit/a63cc9a42ff60c4e969586aea916446c4d73d3e7) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 64 | - [`0854828`](https://github.com/clouddrove/terraform-aws-eks/commit/08548281013efceb2bc58ecfa2b8b7f735bd76dc) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 65 | - [`9cc2ba8`](https://github.com/clouddrove/terraform-aws-eks/commit/9cc2ba84d7c38127049c92f360e48ff2aa9e19dc) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 66 | - [`114b2b4`](https://github.com/clouddrove/terraform-aws-eks/commit/114b2b4d90ac37ac20587f7e0c6182332d10af76) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 67 | - [`b707e3e`](https://github.com/clouddrove/terraform-aws-eks/commit/b707e3e9a376171feff3a8fe5dca69eef0d59b0a) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 68 | - [`cbeab87`](https://github.com/clouddrove/terraform-aws-eks/commit/cbeab870f2456b60e952f75fdac208b95fb1fcf8) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 69 | - [`abe8d90`](https://github.com/clouddrove/terraform-aws-eks/commit/abe8d90fd1138ac841fed3bf35b878f0e1012435) - fargate profile added *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 70 | - [`1e4c37a`](https://github.com/clouddrove/terraform-aws-eks/commit/1e4c37abddbecd6f87337c1700f77df852ea5c2f) - fargate profile added *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 71 | - [`25c9650`](https://github.com/clouddrove/terraform-aws-eks/commit/25c9650645ce130ba13f95cf9ba89850fc7f98ce) - default variable removed *(commit by [@d4kverma](https://github.com/d4kverma))* 72 | - [`8afa1d5`](https://github.com/clouddrove/terraform-aws-eks/commit/8afa1d543e7adf82601565d06445cd6d3e95eea6) - version fixed *(commit by [@d4kverma](https://github.com/d4kverma))* 73 | - [`71b27cd`](https://github.com/clouddrove/terraform-aws-eks/commit/71b27cd7af357fb07b81f665a46a29daa1d465cf) - version fixed *(commit by [@d4kverma](https://github.com/d4kverma))* 74 | - [`9b4604d`](https://github.com/clouddrove/terraform-aws-eks/commit/9b4604d303fdc9a8d365dcb262bd57a35bac8349) - additional tags for public and private subnets *(PR [#58](https://github.com/clouddrove/terraform-aws-eks/pull/58) by [@h1manshu98](https://github.com/h1manshu98))* 75 | 76 | ### :bug: Bug Fixes 77 | - [`24b6c49`](https://github.com/clouddrove/terraform-aws-eks/commit/24b6c493f79176998d4073325feaed7313e15f6e) - Enabled key rotation in fargate example *(commit by [@13archit](https://github.com/13archit))* 78 | - [`10c3a9b`](https://github.com/clouddrove/terraform-aws-eks/commit/10c3a9b32e46a427568399ac9d6a38528d054eee) - Fixed tfsec ignore *(commit by [@13archit](https://github.com/13archit))* 79 | - [`3ea65e5`](https://github.com/clouddrove/terraform-aws-eks/commit/3ea65e562627f93eb4b13f458c59e3b7c9331e76) - Added tfsec ignore *(commit by [@13archit](https://github.com/13archit))* 80 | - [`1bbff08`](https://github.com/clouddrove/terraform-aws-eks/commit/1bbff08dc43595c328337e27b3c207948dea3a6f) - fix tflint workflow. *(commit by [@13archit](https://github.com/13archit))* 81 | - [`72abff5`](https://github.com/clouddrove/terraform-aws-eks/commit/72abff5743e388fd635f3b25e4b1da97bd7c0e9a) - removed keypair module *(commit by [@h1manshu98](https://github.com/h1manshu98))* 82 | - [`eef6961`](https://github.com/clouddrove/terraform-aws-eks/commit/eef69618d577be864c5d0a1624448df54fc0f7bd) - removed keypair module *(commit by [@h1manshu98](https://github.com/h1manshu98))* 83 | - [`3c6b476`](https://github.com/clouddrove/terraform-aws-eks/commit/3c6b4760d91280824075588215a1270cf6cd67ea) - removed keypair module *(commit by [@h1manshu98](https://github.com/h1manshu98))* 84 | 85 | 86 | ## [0.15.2] - 2022-07-05 87 | 88 | ## [1.0.1] - 2022-07-29 89 | 90 | ## [0.12.9.2] - 2022-04-26 91 | 92 | ## [1.0.0] - 2022-03-30 93 | 94 | ## [0.15.1] - 2021-12-10 95 | 96 | ## [0.15.0.1] - 2021-11-11 97 | 98 | ## [0.12.10.1] - 2021-09-03 99 | 100 | ## [0.12.13.1] - 2021-07-22 101 | 102 | ## [0.12.9.1] - 2021-07-22 103 | 104 | ## [0.15.0] - 2021-06-30 105 | 106 | ## [0.12.6.1] - 2021-03-25 107 | 108 | ## [0.13.0] - 2020-11-03 109 | 110 | ## [0.12.13] - 2020-11-02 111 | 112 | ## [0.12.12] - 2020-11-02 113 | 114 | ## [0.12.11] - 2020-09-29 115 | 116 | ## [0.12.10] - 2020-09-08 117 | 118 | ## [0.12.9] - 2020-07-15 119 | 120 | ## [0.12.8] - 2020-07-14 121 | 122 | ## [0.12.7] - 2020-07-02 123 | 124 | ## [0.12.6] - 2020-05-24 125 | 126 | ## [0.12.5] - 2020-03-05 127 | 128 | ## [0.12.4] - 2019-12-30 129 | 130 | ## [0.12.3] - 2019-12-05 131 | 132 | ## [0.12.2] - 2019-12-02 133 | 134 | ## [0.12.0] - 2019-11-08 135 | 136 | 137 | [0.12.0]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.0...master 138 | [0.12.2]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.2...master 139 | [0.12.3]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.3...master 140 | [0.12.4]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.4...master 141 | [0.12.5]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.5...master 142 | [0.12.6]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.6 143 | [0.12.7]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.7...master 144 | [0.12.8]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.8 145 | [0.12.9]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.9...master 146 | [0.12.10]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.10...master 147 | [0.12.11]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.11...master 148 | [0.12.12]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.12 149 | [0.12.13]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.13 150 | [0.13.0]: https://github.com/clouddrove/terraform-aws-eks/compare/0.13.0...master 151 | [0.12.6.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.6.1 152 | [0.15.0]: https://github.com/clouddrove/terraform-aws-eks/compare/0.15.0...master 153 | [0.12.9.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.9.1 154 | [0.12.13.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.13.1 155 | [0.12.10.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.10.1 156 | [0.15.0.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.15.0.1 157 | [0.15.1]: https://github.com/clouddrove/terraform-aws-eks/compare/0.15.1...master 158 | [1.0.0]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/1.0.0 159 | [0.12.9.2]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.9.2 160 | [1.0.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/1.0.1 161 | [0.15.2]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.15.2 162 | [1.4.0]: https://github.com/clouddrove/terraform-aws-eks/compare/1.3.0...1.4.0 163 | [1.4.1]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.0...1.4.1 164 | [1.4.2]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.1...1.4.2 165 | [1.4.3]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.2...1.4.3 166 | [1.4.4]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.3...1.4.4 167 | [1.4.5]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.4...1.4.5 168 | -------------------------------------------------------------------------------- /examples/complete/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | locals { 5 | name = "clouddrove-eks" 6 | region = "eu-west-1" 7 | vpc_cidr_block = module.vpc.vpc_cidr_block 8 | additional_cidr_block = "172.16.0.0/16" 9 | environment = "test" 10 | label_order = ["name", "environment"] 11 | tags = { 12 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 13 | } 14 | } 15 | 16 | ################################################################################ 17 | # VPC module call 18 | ################################################################################ 19 | module "vpc" { 20 | source = "clouddrove/vpc/aws" 21 | version = "2.0.0" 22 | 23 | name = "${local.name}-vpc" 24 | environment = local.environment 25 | cidr_block = "10.10.0.0/16" 26 | } 27 | 28 | ################################################################################ 29 | # Subnets module call 30 | ################################################################################ 31 | module "subnets" { 32 | source = "clouddrove/subnet/aws" 33 | version = "2.0.0" 34 | 35 | name = "${local.name}-subnets" 36 | environment = local.environment 37 | 38 | nat_gateway_enabled = true 39 | availability_zones = ["${local.region}a", "${local.region}b"] 40 | vpc_id = module.vpc.vpc_id 41 | cidr_block = module.vpc.vpc_cidr_block 42 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 43 | type = "public-private" 44 | igw_id = module.vpc.igw_id 45 | 46 | extra_public_tags = { 47 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 48 | "kubernetes.io/role/elb" = "1" 49 | } 50 | 51 | extra_private_tags = { 52 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 53 | "kubernetes.io/role/internal-elb" = "1" 54 | } 55 | 56 | public_inbound_acl_rules = [ 57 | { 58 | rule_number = 100 59 | rule_action = "allow" 60 | from_port = 0 61 | to_port = 0 62 | protocol = "-1" 63 | cidr_block = "0.0.0.0/0" 64 | }, 65 | { 66 | rule_number = 101 67 | rule_action = "allow" 68 | from_port = 0 69 | to_port = 0 70 | protocol = "-1" 71 | ipv6_cidr_block = "::/0" 72 | }, 73 | ] 74 | 75 | public_outbound_acl_rules = [ 76 | { 77 | rule_number = 100 78 | rule_action = "allow" 79 | from_port = 0 80 | to_port = 0 81 | protocol = "-1" 82 | cidr_block = "0.0.0.0/0" 83 | }, 84 | { 85 | rule_number = 101 86 | rule_action = "allow" 87 | from_port = 0 88 | to_port = 0 89 | protocol = "-1" 90 | ipv6_cidr_block = "::/0" 91 | }, 92 | ] 93 | 94 | private_inbound_acl_rules = [ 95 | { 96 | rule_number = 100 97 | rule_action = "allow" 98 | from_port = 0 99 | to_port = 0 100 | protocol = "-1" 101 | cidr_block = "0.0.0.0/0" 102 | }, 103 | { 104 | rule_number = 101 105 | rule_action = "allow" 106 | from_port = 0 107 | to_port = 0 108 | protocol = "-1" 109 | ipv6_cidr_block = "::/0" 110 | }, 111 | ] 112 | 113 | private_outbound_acl_rules = [ 114 | { 115 | rule_number = 100 116 | rule_action = "allow" 117 | from_port = 0 118 | to_port = 0 119 | protocol = "-1" 120 | cidr_block = "0.0.0.0/0" 121 | }, 122 | { 123 | rule_number = 101 124 | rule_action = "allow" 125 | from_port = 0 126 | to_port = 0 127 | protocol = "-1" 128 | ipv6_cidr_block = "::/0" 129 | }, 130 | ] 131 | } 132 | 133 | 134 | # ################################################################################ 135 | # Security Groups module call 136 | ################################################################################ 137 | module "ssh" { 138 | source = "clouddrove/security-group/aws" 139 | version = "2.0.0" 140 | 141 | name = "${local.name}-ssh" 142 | environment = local.environment 143 | 144 | vpc_id = module.vpc.vpc_id 145 | new_sg_ingress_rules_with_cidr_blocks = [{ 146 | rule_count = 1 147 | from_port = 22 148 | protocol = "tcp" 149 | to_port = 22 150 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 151 | description = "Allow ssh traffic." 152 | }, 153 | { 154 | rule_count = 2 155 | from_port = 27017 156 | protocol = "tcp" 157 | to_port = 27017 158 | cidr_blocks = [local.additional_cidr_block] 159 | description = "Allow Mongodb traffic." 160 | } 161 | ] 162 | 163 | ## EGRESS Rules 164 | new_sg_egress_rules_with_cidr_blocks = [{ 165 | rule_count = 1 166 | from_port = 22 167 | protocol = "tcp" 168 | to_port = 22 169 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 170 | description = "Allow ssh outbound traffic." 171 | }, 172 | { 173 | rule_count = 2 174 | from_port = 27017 175 | protocol = "tcp" 176 | to_port = 27017 177 | cidr_blocks = [local.additional_cidr_block] 178 | description = "Allow Mongodb outbound traffic." 179 | }] 180 | } 181 | 182 | module "http_https" { 183 | source = "clouddrove/security-group/aws" 184 | version = "2.0.0" 185 | 186 | name = "${local.name}-http-https" 187 | environment = local.environment 188 | 189 | vpc_id = module.vpc.vpc_id 190 | ## INGRESS Rules 191 | new_sg_ingress_rules_with_cidr_blocks = [{ 192 | rule_count = 1 193 | from_port = 22 194 | protocol = "tcp" 195 | to_port = 22 196 | cidr_blocks = [module.vpc.vpc_cidr_block] 197 | description = "Allow ssh traffic." 198 | }, 199 | { 200 | rule_count = 2 201 | from_port = 80 202 | protocol = "tcp" 203 | to_port = 80 204 | cidr_blocks = [module.vpc.vpc_cidr_block] 205 | description = "Allow http traffic." 206 | }, 207 | { 208 | rule_count = 3 209 | from_port = 443 210 | protocol = "tcp" 211 | to_port = 443 212 | cidr_blocks = [module.vpc.vpc_cidr_block] 213 | description = "Allow https traffic." 214 | } 215 | ] 216 | 217 | ## EGRESS Rules 218 | new_sg_egress_rules_with_cidr_blocks = [{ 219 | rule_count = 1 220 | from_port = 0 221 | protocol = "-1" 222 | to_port = 0 223 | cidr_blocks = ["0.0.0.0/0"] 224 | ipv6_cidr_blocks = ["::/0"] 225 | description = "Allow all traffic." 226 | } 227 | ] 228 | } 229 | 230 | ################################################################################ 231 | # KMS Module call 232 | ################################################################################ 233 | module "kms" { 234 | source = "clouddrove/kms/aws" 235 | version = "1.3.0" 236 | 237 | name = "${local.name}-kms" 238 | environment = local.environment 239 | label_order = local.label_order 240 | enabled = true 241 | description = "KMS key for EBS of EKS nodes" 242 | enable_key_rotation = false 243 | policy = data.aws_iam_policy_document.kms.json 244 | } 245 | 246 | data "aws_iam_policy_document" "kms" { 247 | version = "2012-10-17" 248 | statement { 249 | sid = "Enable IAM User Permissions" 250 | effect = "Allow" 251 | principals { 252 | type = "AWS" 253 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 254 | } 255 | actions = ["kms:*"] 256 | resources = ["*"] 257 | } 258 | } 259 | 260 | data "aws_caller_identity" "current" {} 261 | 262 | ################################################################################ 263 | # EKS Module call 264 | ################################################################################ 265 | module "eks" { 266 | source = "../.." 267 | 268 | name = local.name 269 | environment = local.environment 270 | enabled = true 271 | 272 | kubernetes_version = "1.32" 273 | endpoint_private_access = true 274 | endpoint_public_access = true 275 | 276 | # Networking 277 | vpc_id = module.vpc.vpc_id 278 | subnet_ids = module.subnets.private_subnet_id 279 | allowed_security_groups = [module.ssh.security_group_id] 280 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 281 | allowed_cidr_blocks = [local.vpc_cidr_block] 282 | 283 | # Self Managed Node Group 284 | # Node Groups Defaults Values It will Work all Node Groups 285 | self_node_group_defaults = { 286 | subnet_ids = module.subnets.private_subnet_id 287 | propagate_tags = [{ 288 | key = "aws-node-termination-handler/managed" 289 | value = true 290 | propagate_at_launch = true 291 | }, 292 | { 293 | key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" 294 | value = "owned" 295 | propagate_at_launch = true 296 | } 297 | ] 298 | block_device_mappings = { 299 | xvda = { 300 | device_name = "/dev/xvda" 301 | ebs = { 302 | volume_size = 50 303 | volume_type = "gp3" 304 | iops = 3000 305 | throughput = 150 306 | encrypted = true 307 | kms_key_id = module.kms.key_arn 308 | } 309 | } 310 | } 311 | } 312 | self_node_groups = { 313 | self_managed_critical = { 314 | name = "self_managed_critical" 315 | min_size = 1 316 | max_size = 2 317 | desired_size = 1 318 | bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" 319 | instance_type = "t3.medium" 320 | } 321 | self_managed_application = { 322 | name = "self_managed_application" 323 | instance_market_options = { 324 | market_type = "spot" 325 | } 326 | min_size = 1 327 | max_size = 2 328 | desired_size = 1 329 | bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" 330 | instance_type = "t3.medium" 331 | } 332 | # Schdule EKS Managed Auto Scaling node group. Change start_time and end_time. 333 | schedules = { 334 | scale-up = { 335 | min_size = 2 336 | max_size = 2 # Retains current max size 337 | desired_size = 2 338 | start_time = "2023-09-15T19:00:00Z" 339 | end_time = "2023-09-19T19:00:00Z" 340 | timezone = "Europe/Amsterdam" 341 | recurrence = "0 7 * * 1" 342 | }, 343 | scale-down = { 344 | min_size = 0 345 | max_size = 0 # Retains current max size 346 | desired_size = 0 347 | start_time = "2023-09-12T12:00:00Z" 348 | end_time = "2024-03-05T12:00:00Z" 349 | timezone = "Europe/Amsterdam" 350 | recurrence = "0 7 * * 5" 351 | } 352 | } 353 | 354 | } 355 | # AWS Managed Node Group 356 | # Node Groups Defaults Values It will Work all Node Groups 357 | managed_node_group_defaults = { 358 | subnet_ids = module.subnets.private_subnet_id 359 | nodes_additional_security_group_ids = [module.ssh.security_group_id] 360 | tags = { 361 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 362 | "k8s.io/cluster/${module.eks.cluster_name}" = "shared" 363 | } 364 | block_device_mappings = { 365 | xvda = { 366 | device_name = "/dev/xvda" 367 | ebs = { 368 | volume_size = 50 369 | volume_type = "gp3" 370 | iops = 3000 371 | throughput = 150 372 | encrypted = true 373 | kms_key_id = module.kms.key_arn 374 | } 375 | } 376 | } 377 | } 378 | managed_node_group = { 379 | critical = { 380 | name = "${module.eks.cluster_name}-critical" 381 | min_size = 1 382 | max_size = 2 383 | desired_size = 1 384 | instance_types = ["t3.medium"] 385 | ami_type = "BOTTLEROCKET_x86_64" 386 | } 387 | application = { 388 | name = "${module.eks.cluster_name}-application" 389 | capacity_type = "SPOT" 390 | 391 | min_size = 1 392 | max_size = 2 393 | desired_size = 1 394 | force_update_version = true 395 | instance_types = ["t3.medium"] 396 | ami_type = "BOTTLEROCKET_x86_64" 397 | } 398 | } 399 | apply_config_map_aws_auth = true 400 | map_additional_iam_users = [ 401 | { 402 | userarn = "arn:aws:iam::123456789:user/hello@clouddrove.com" 403 | username = "hello@clouddrove.com" 404 | groups = ["system:masters"] 405 | } 406 | ] 407 | # Schdule EKS Managed Auto Scaling node group 408 | schedules = { 409 | scale-up = { 410 | min_size = 2 411 | max_size = 2 # Retains current max size 412 | desired_size = 2 413 | start_time = "2023-09-15T19:00:00Z" 414 | end_time = "2023-09-19T19:00:00Z" 415 | timezone = "Europe/Amsterdam" 416 | recurrence = "0 7 * * 1" 417 | }, 418 | scale-down = { 419 | min_size = 0 420 | max_size = 0 # Retains current max size 421 | desired_size = 0 422 | start_time = "2023-09-12T12:00:00Z" 423 | end_time = "2024-03-05T12:00:00Z" 424 | timezone = "Europe/Amsterdam" 425 | recurrence = "0 7 * * 5" 426 | } 427 | } 428 | } 429 | 430 | # Kubernetes provider configuration 431 | data "aws_eks_cluster" "this" { 432 | name = module.eks.cluster_id 433 | } 434 | 435 | data "aws_eks_cluster_auth" "this" { 436 | name = module.eks.cluster_certificate_authority_data 437 | } 438 | provider "kubernetes" { 439 | host = data.aws_eks_cluster.this.endpoint 440 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 441 | token = data.aws_eks_cluster_auth.this.token 442 | } -------------------------------------------------------------------------------- /node_group/self_managed/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | self_managed_node_group_default_tags = { 3 | "Name" = "${module.labels.id}" 4 | "Environment" = "${var.environment}" 5 | "kubernetes.io/cluster/${var.cluster_name}" = "owned" 6 | "k8s.io/cluster/${var.cluster_name}" = "owned" 7 | } 8 | userdata = var.enabled ? templatefile("${path.module}/_userdata.tpl", { 9 | cluster_endpoint = var.cluster_endpoint 10 | certificate_authority_data = var.cluster_auth_base64 11 | cluster_name = var.cluster_name 12 | bootstrap_extra_args = var.bootstrap_extra_args 13 | }) : null 14 | } 15 | 16 | data "aws_partition" "current" {} 17 | 18 | data "aws_caller_identity" "current" {} 19 | 20 | 21 | #AMI AMAZON LINUX 22 | data "aws_ami" "eks_default" { 23 | count = var.enabled ? 1 : 0 24 | 25 | filter { 26 | name = "name" 27 | values = ["amazon-eks-node-${var.kubernetes_version}-v*"] 28 | } 29 | 30 | most_recent = true 31 | owners = ["amazon"] 32 | } 33 | 34 | #Module : label 35 | #Description : Terraform module to create consistent naming for multiple names. 36 | module "labels" { 37 | source = "clouddrove/labels/aws" 38 | version = "1.3.0" 39 | 40 | name = var.name 41 | repository = var.repository 42 | environment = var.environment 43 | managedby = var.managedby 44 | extra_tags = var.tags 45 | attributes = compact(concat(var.attributes, ["nodes"])) 46 | label_order = var.label_order 47 | } 48 | 49 | 50 | resource "aws_launch_template" "this" { 51 | count = var.enabled ? 1 : 0 52 | name = module.labels.id 53 | 54 | ebs_optimized = var.ebs_optimized 55 | image_id = data.aws_ami.eks_default[0].image_id 56 | instance_type = var.instance_type 57 | key_name = var.key_name 58 | user_data = base64decode(local.userdata) 59 | disable_api_termination = var.disable_api_termination 60 | instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior 61 | kernel_id = var.kernel_id 62 | ram_disk_id = var.ram_disk_id 63 | 64 | 65 | #volumes 66 | dynamic "block_device_mappings" { 67 | for_each = var.block_device_mappings 68 | content { 69 | device_name = block_device_mappings.value.device_name 70 | no_device = lookup(block_device_mappings.value, "no_device", null) 71 | virtual_name = lookup(block_device_mappings.value, "virtual_name", null) 72 | 73 | 74 | dynamic "ebs" { 75 | for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) 76 | content { 77 | delete_on_termination = true 78 | encrypted = true 79 | kms_key_id = var.kms_key_id 80 | iops = lookup(ebs.value, "iops", null) 81 | throughput = lookup(ebs.value, "throughput", null) 82 | snapshot_id = lookup(ebs.value, "snapshot_id", null) 83 | volume_size = lookup(ebs.value, "volume_size", null) 84 | volume_type = lookup(ebs.value, "volume_type", null) 85 | } 86 | } 87 | } 88 | } 89 | 90 | # capacity_reservation 91 | dynamic "capacity_reservation_specification" { 92 | for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : [] 93 | content { 94 | capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) 95 | 96 | dynamic "capacity_reservation_target" { 97 | for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", []) 98 | content { 99 | capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) 100 | } 101 | } 102 | } 103 | } 104 | 105 | #CPU option 106 | dynamic "cpu_options" { 107 | for_each = var.cpu_options != null ? [var.cpu_options] : [] 108 | content { 109 | core_count = cpu_options.value.core_count 110 | threads_per_core = cpu_options.value.threads_per_core 111 | } 112 | } 113 | 114 | #credit_specification 115 | dynamic "credit_specification" { 116 | for_each = var.credit_specification != null ? [var.credit_specification] : [] 117 | content { 118 | cpu_credits = credit_specification.value.cpu_credits 119 | } 120 | } 121 | 122 | dynamic "enclave_options" { 123 | for_each = var.enclave_options != null ? [var.enclave_options] : [] 124 | content { 125 | enabled = enclave_options.value.enabled 126 | } 127 | } 128 | 129 | dynamic "hibernation_options" { 130 | for_each = var.hibernation_options != null ? [var.hibernation_options] : [] 131 | content { 132 | configured = hibernation_options.value.configured 133 | } 134 | } 135 | 136 | iam_instance_profile { 137 | arn = var.iam_instance_profile_arn 138 | } 139 | 140 | 141 | dynamic "instance_market_options" { 142 | for_each = var.instance_market_options != null ? [var.instance_market_options] : [] 143 | content { 144 | market_type = instance_market_options.value.market_type 145 | 146 | dynamic "spot_options" { 147 | for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : [] 148 | content { 149 | block_duration_minutes = lookup(spot_options.value, block_duration_minutes, null) 150 | instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) 151 | max_price = lookup(spot_options.value, "max_price", null) 152 | spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) 153 | valid_until = lookup(spot_options.value, "valid_until", null) 154 | } 155 | } 156 | } 157 | } 158 | 159 | dynamic "license_specification" { 160 | for_each = var.license_specifications != null ? [var.license_specifications] : [] 161 | content { 162 | license_configuration_arn = license_specifications.value.license_configuration_arn 163 | } 164 | } 165 | 166 | dynamic "metadata_options" { 167 | for_each = var.metadata_options != null ? [var.metadata_options] : [] 168 | content { 169 | http_endpoint = lookup(metadata_options.value, "http_endpoint", null) 170 | http_tokens = lookup(metadata_options.value, "http_tokens", null) 171 | http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) 172 | http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) 173 | instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) 174 | } 175 | } 176 | 177 | dynamic "monitoring" { 178 | for_each = var.enable_monitoring != null ? [1] : [] 179 | content { 180 | enabled = var.enable_monitoring 181 | } 182 | } 183 | 184 | 185 | network_interfaces { 186 | description = module.labels.id 187 | device_index = 0 188 | associate_public_ip_address = var.associate_public_ip_address 189 | delete_on_termination = true 190 | security_groups = var.security_group_ids 191 | } 192 | 193 | dynamic "placement" { 194 | for_each = var.placement != null ? [var.placement] : [] 195 | content { 196 | affinity = lookup(placement.value, "affinity", null) 197 | availability_zone = lookup(placement.value, "availability_zone", null) 198 | group_name = lookup(placement.value, "group_name", null) 199 | host_id = lookup(placement.value, "host_id", null) 200 | spread_domain = lookup(placement.value, "spread_domain", null) 201 | tenancy = lookup(placement.value, "tenancy", null) 202 | partition_number = lookup(placement.value, "partition_number", null) 203 | } 204 | } 205 | 206 | 207 | dynamic "tag_specifications" { 208 | for_each = toset(["instance", "volume", "network-interface"]) 209 | content { 210 | resource_type = tag_specifications.key 211 | tags = merge( 212 | module.labels.tags, 213 | { Name = module.labels.id }) 214 | } 215 | } 216 | 217 | lifecycle { 218 | create_before_destroy = true 219 | } 220 | 221 | 222 | tags = module.labels.tags 223 | 224 | } 225 | 226 | 227 | resource "aws_autoscaling_group" "this" { 228 | count = var.enabled ? 1 : 0 229 | 230 | name = module.labels.id 231 | 232 | dynamic "launch_template" { 233 | for_each = var.use_mixed_instances_policy ? [] : [1] 234 | 235 | content { 236 | name = aws_launch_template.this[0].name 237 | version = aws_launch_template.this[0].latest_version 238 | } 239 | } 240 | 241 | availability_zones = var.availability_zones 242 | vpc_zone_identifier = var.subnet_ids 243 | 244 | min_size = var.min_size 245 | max_size = var.max_size 246 | desired_capacity = var.desired_size 247 | capacity_rebalance = var.capacity_rebalance 248 | min_elb_capacity = var.min_elb_capacity 249 | wait_for_elb_capacity = var.wait_for_elb_capacity 250 | wait_for_capacity_timeout = var.wait_for_capacity_timeout 251 | default_cooldown = var.default_cooldown 252 | protect_from_scale_in = var.protect_from_scale_in 253 | 254 | target_group_arns = var.target_group_arns 255 | placement_group = var.placement_group 256 | health_check_type = var.health_check_type 257 | health_check_grace_period = var.health_check_grace_period 258 | 259 | force_delete = var.force_delete 260 | termination_policies = var.termination_policies 261 | suspended_processes = var.suspended_processes 262 | max_instance_lifetime = var.max_instance_lifetime 263 | 264 | enabled_metrics = var.enabled_metrics 265 | metrics_granularity = var.metrics_granularity 266 | service_linked_role_arn = var.service_linked_role_arn 267 | 268 | dynamic "initial_lifecycle_hook" { 269 | for_each = var.initial_lifecycle_hooks 270 | content { 271 | name = initial_lifecycle_hook.value.name 272 | default_result = lookup(initial_lifecycle_hook.value, "default_result", null) 273 | heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null) 274 | lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition 275 | notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null) 276 | notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null) 277 | role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null) 278 | } 279 | } 280 | 281 | dynamic "instance_refresh" { 282 | for_each = var.instance_refresh != null ? [var.instance_refresh] : [] 283 | content { 284 | strategy = instance_refresh.value.strategy 285 | triggers = lookup(instance_refresh.value, "triggers", null) 286 | 287 | dynamic "preferences" { 288 | for_each = lookup(instance_refresh.value, "preferences", null) != null ? [instance_refresh.value.preferences] : [] 289 | content { 290 | instance_warmup = lookup(preferences.value, "instance_warmup", null) 291 | min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null) 292 | checkpoint_delay = lookup(preferences.value, "checkpoint_delay", null) 293 | checkpoint_percentages = lookup(preferences.value, "checkpoint_percentages", null) 294 | } 295 | } 296 | } 297 | } 298 | 299 | dynamic "mixed_instances_policy" { 300 | for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : [] 301 | content { 302 | dynamic "instances_distribution" { 303 | for_each = try([mixed_instances_policy.value.instances_distribution], []) 304 | content { 305 | on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null) 306 | on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null) 307 | on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null) 308 | spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null) 309 | spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null) 310 | spot_max_price = lookup(instances_distribution.value, "spot_max_price", null) 311 | } 312 | } 313 | 314 | launch_template { 315 | launch_template_specification { 316 | launch_template_name = aws_launch_template.this[0].name 317 | version = aws_launch_template.this[0].latest_version 318 | } 319 | 320 | dynamic "override" { 321 | for_each = try(mixed_instances_policy.value.override, []) 322 | content { 323 | instance_type = lookup(override.value, "instance_type", null) 324 | weighted_capacity = lookup(override.value, "weighted_capacity", null) 325 | 326 | dynamic "launch_template_specification" { 327 | for_each = lookup(override.value, "launch_template_specification", null) != null ? override.value.launch_template_specification : [] 328 | content { 329 | launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null) 330 | } 331 | } 332 | } 333 | } 334 | } 335 | } 336 | } 337 | 338 | dynamic "warm_pool" { 339 | for_each = var.warm_pool != null ? [var.warm_pool] : [] 340 | content { 341 | pool_state = lookup(warm_pool.value, "pool_state", null) 342 | min_size = lookup(warm_pool.value, "min_size", null) 343 | max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null) 344 | } 345 | } 346 | 347 | timeouts { 348 | delete = var.cluster_delete_timeout 349 | } 350 | 351 | 352 | lifecycle { 353 | create_before_destroy = true 354 | ignore_changes = [ 355 | desired_capacity 356 | ] 357 | } 358 | 359 | dynamic "tag" { 360 | for_each = merge(local.self_managed_node_group_default_tags, var.tags) 361 | content { 362 | key = tag.key 363 | value = tag.value 364 | propagate_at_launch = true 365 | } 366 | } 367 | } 368 | 369 | #---------------------------------------------------ASG-schedule----------------------------------------------------------- 370 | 371 | resource "aws_autoscaling_schedule" "this" { 372 | for_each = var.enabled && var.create_schedule ? var.schedules : {} 373 | 374 | scheduled_action_name = each.key 375 | autoscaling_group_name = aws_autoscaling_group.this[0].name 376 | 377 | min_size = lookup(each.value, "min_size", null) 378 | max_size = lookup(each.value, "max_size", null) 379 | desired_capacity = lookup(each.value, "desired_size", null) 380 | start_time = lookup(each.value, "start_time", null) 381 | end_time = lookup(each.value, "end_time", null) 382 | time_zone = lookup(each.value, "time_zone", null) 383 | 384 | # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] 385 | # Cron examples: https://crontab.guru/examples.html 386 | recurrence = lookup(each.value, "recurrence", null) 387 | } 388 | 389 | 390 | 391 | --------------------------------------------------------------------------------