├── .deepsource.toml ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md ├── dependbot.yml └── workflows │ ├── auto_assignee.yml │ ├── automerge.yml │ ├── changelog.yml │ ├── readme.yml │ ├── tf-checks.yml │ ├── tflint.yml │ └── tfsec.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── README.yaml ├── aws_auth.tf ├── aws_node_groups.tf ├── data.tf ├── docs └── io.md ├── examples ├── aws_managed │ ├── example.tf │ ├── output.tf │ └── versions.tf ├── aws_managed_with_fargate │ ├── example.tf │ ├── output.tf │ └── versions.tf ├── complete │ ├── example.tf │ ├── output.tf │ └── versions.tf └── self_managed │ ├── example.tf │ ├── output.tf │ └── versions.tf ├── fargate_profile.tf ├── iam.tf ├── kms.tf ├── kubeconfig.tpl ├── locals.tf ├── main.tf ├── node_group ├── aws_managed │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── fargate_profile │ ├── fargate.tf │ └── variables.tf └── self_managed │ ├── _userdata.tpl │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── outputs.tf ├── security_groups.tf ├── self_node_groups.tf ├── variables.tf └── versions.tf /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "terraform" -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in the repo. 2 | * @anmolnagpal @clouddrove/approvers @clouddrove-ci 3 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## what 2 | * Describe high-level what changed as a result of these commits (i.e. in plain-english, what do these changes mean?) 3 | * Use bullet points to be concise and to the point. 4 | 5 | ## why 6 | * Provide the justifications for the changes (e.g. business case). 7 | * Describe why these changes were made (e.g. why do these commits fix the problem?) 8 | * Use bullet points to be concise and to the point. 9 | 10 | ## references 11 | * Link to any supporting jira issues or helpful documentation to add some context (e.g. stackoverflow). 12 | * Use `closes #123`, if this PR closes a Jira issue `#123` 13 | -------------------------------------------------------------------------------- /.github/dependbot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | version: 2 6 | updates: 7 | - package-ecosystem: "terraform" # See documentation for possible values 8 | directory: "/" # Location of package manifests 9 | schedule: 10 | interval: "weekly" 11 | # Add assignees 12 | assignees: 13 | - "clouddrove-ci" 14 | # Add reviewer 15 | reviewers: 16 | - "approvers" 17 | - package-ecosystem: "terraform" # See documentation for possible values 18 | directory: "examples/aws_managed" # Location of package manifests 19 | schedule: 20 | interval: "weekly" 21 | # Add assignees 22 | assignees: 23 | - "clouddrove-ci" 24 | # Add reviewer 25 | reviewers: 26 | - "approvers" 27 | - package-ecosystem: "terraform" # See documentation for possible values 28 | directory: "examples/complete" # Location of package manifests 29 | schedule: 30 | interval: "weekly" 31 | # Add assignees 32 | assignees: 33 | - "clouddrove-ci" 34 | # Add reviewer 35 | reviewers: 36 | - "approvers" 37 | - package-ecosystem: "terraform" # See documentation for possible values 38 | directory: "examples/self_managed" # Location of package manifests 39 | schedule: 40 | interval: "weekly" 41 | # Add assignees 42 | assignees: 43 | - "clouddrove-ci" 44 | # Add reviewer 45 | reviewers: 46 | - "approvers" 47 | -------------------------------------------------------------------------------- /.github/workflows/auto_assignee.yml: -------------------------------------------------------------------------------- 1 | name: Auto Assign PRs 2 | 3 | on: 4 | pull_request: 5 | types: [opened, reopened] 6 | 7 | workflow_dispatch: 8 | jobs: 9 | assignee: 10 | uses: clouddrove/github-shared-workflows/.github/workflows/auto_assignee.yml@master 11 | secrets: 12 | GITHUB: ${{ secrets.GITHUB }} 13 | with: 14 | assignees: 'clouddrove-ci' 15 | -------------------------------------------------------------------------------- /.github/workflows/automerge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Auto merge 3 | on: 4 | pull_request: 5 | jobs: 6 | auto-merge: 7 | uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@master 8 | secrets: 9 | GITHUB: ${{ secrets.GITHUB }} 10 | with: 11 | tfcheck: 'tf-checks-aws-managed-example / Check code format' 12 | ... 13 | -------------------------------------------------------------------------------- /.github/workflows/changelog.yml: -------------------------------------------------------------------------------- 1 | name: changelog 2 | permissions: write-all 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | workflow_dispatch: 8 | jobs: 9 | changelog: 10 | uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@master 11 | secrets: inherit 12 | with: 13 | branch: 'master' 14 | -------------------------------------------------------------------------------- /.github/workflows/readme.yml: -------------------------------------------------------------------------------- 1 | name: Readme Workflow 2 | on: 3 | push: 4 | branches: 5 | - master 6 | paths-ignore: 7 | - 'README.md' 8 | - 'docs/**' 9 | workflow_dispatch: 10 | jobs: 11 | README: 12 | uses: clouddrove/github-shared-workflows/.github/workflows/readme.yml@master 13 | secrets: 14 | TOKEN : ${{ secrets.GITHUB }} 15 | SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} -------------------------------------------------------------------------------- /.github/workflows/tf-checks.yml: -------------------------------------------------------------------------------- 1 | name: tf-checks 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | workflow_dispatch: 7 | jobs: 8 | tf-checks-aws-managed-example: 9 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master 10 | with: 11 | working_directory: './examples/aws_managed/' 12 | tf-checks-complete-example: 13 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master 14 | with: 15 | working_directory: './examples/complete/' 16 | tf-checks-self-managed-example: 17 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master 18 | with: 19 | working_directory: './examples/self_managed/' -------------------------------------------------------------------------------- /.github/workflows/tflint.yml: -------------------------------------------------------------------------------- 1 | name: tf-lint 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | workflow_dispatch: 7 | jobs: 8 | tf-lint: 9 | uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@master 10 | secrets: 11 | GITHUB: ${{ secrets.GITHUB }} 12 | -------------------------------------------------------------------------------- /.github/workflows/tfsec.yml: -------------------------------------------------------------------------------- 1 | name: tfsec 2 | permissions: write-all 3 | on: 4 | pull_request: 5 | workflow_dispatch: 6 | jobs: 7 | tfsec: 8 | uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@master 9 | secrets: inherit 10 | with: 11 | working_directory: '.' 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | eks-admin-cluster-role-binding.yaml 2 | eks-admin-service-account.yaml 3 | config-map-aws-auth*.yaml 4 | kubeconfig_* 5 | .idea 6 | 7 | ################################################################# 8 | # Default .gitignore content for all terraform-aws-modules below 9 | ################################################################# 10 | 11 | .DS_Store 12 | 13 | # Local .terraform directories 14 | **/.terraform/* 15 | 16 | # Terraform lockfile 17 | .terraform.lock.hcl 18 | 19 | # .tfstate files 20 | *.tfstate 21 | *.tfstate.* 22 | *.tfplan 23 | 24 | # Crash log files 25 | crash.log 26 | 27 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 28 | # password, private keys, and other secrets. These should not be part of version 29 | # control as they are data points which are potentially sensitive and subject 30 | # to change depending on the environment. 31 | *.tfvars 32 | 33 | # Ignore override files as they are usually used to override resources locally and so 34 | # are not checked in 35 | override.tf 36 | override.tf.json 37 | *_override.tf 38 | *_override.tf.json 39 | 40 | # Ignore CLI configuration files 41 | .terraformrc 42 | terraform.rc 43 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.58.0 4 | hooks: 5 | - id: terraform_fmt 6 | # - id: terraform_validate 7 | - id: terraform_docs 8 | args: 9 | - '--args=--lockfile=false' 10 | - id: terraform_tflint 11 | args: 12 | - '--args=--only=terraform_deprecated_interpolation' 13 | - '--args=--only=terraform_deprecated_index' 14 | - '--args=--only=terraform_unused_declarations' 15 | - '--args=--only=terraform_comment_syntax' 16 | - '--args=--only=terraform_documented_outputs' 17 | - '--args=--only=terraform_documented_variables' 18 | - '--args=--only=terraform_typed_variables' 19 | - '--args=--only=terraform_module_pinned_source' 20 | # - '--args=--only=terraform_naming_convention' 21 | - '--args=--only=terraform_required_version' 22 | - '--args=--only=terraform_required_providers' 23 | - '--args=--only=terraform_standard_module_structure' 24 | - '--args=--only=terraform_workspace_remote' 25 | - repo: https://github.com/pre-commit/pre-commit-hooks 26 | rev: v4.0.1 27 | hooks: 28 | - id: check-merge-conflict 29 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [1.4.3] - 2025-04-04 8 | ### :sparkles: New Features 9 | - [`2e163bb`](https://github.com/clouddrove/terraform-aws-eks/commit/2e163bb2caf96ee03ddf8d9ec38c580844f0bf65) - custom NodeGroup names without environment prefix *(PR [#69](https://github.com/clouddrove/terraform-aws-eks/pull/69) by [@Arzianghanchi](https://github.com/Arzianghanchi))* 10 | - [`5dbcb0e`](https://github.com/clouddrove/terraform-aws-eks/commit/5dbcb0e2182ee9cd151d2208c1e9c2c535527ea7) - updated branch name in uses of workflow *(PR [#75](https://github.com/clouddrove/terraform-aws-eks/pull/75) by [@clouddrove-ci](https://github.com/clouddrove-ci))* 11 | 12 | ### :memo: Documentation Changes 13 | - [`a4d89bd`](https://github.com/clouddrove/terraform-aws-eks/commit/a4d89bd9d2fbb3fee77e8986eb4c40b701410790) - update CHANGELOG.md for 1.4.2 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 14 | 15 | 16 | ## [1.4.2] - 2024-09-04 17 | ### :sparkles: New Features 18 | - [`fa4ad11`](https://github.com/clouddrove/terraform-aws-eks/commit/fa4ad11ba153ee8c652943908999a1f4ee4ea30a) - updated branch name in uses of workflow *(PR [#65](https://github.com/clouddrove/terraform-aws-eks/pull/65) by [@rakeshclouddevops](https://github.com/rakeshclouddevops))* 19 | 20 | ### :bug: Bug Fixes 21 | - [`a48263e`](https://github.com/clouddrove/terraform-aws-eks/commit/a48263e285534befc17e6556bcf042688dccab00) - fix data block, data block was calling before eks cluster creation *(PR [#66](https://github.com/clouddrove/terraform-aws-eks/pull/66) by [@nileshgadgi](https://github.com/nileshgadgi))* 22 | 23 | ### :memo: Documentation Changes 24 | - [`afab46b`](https://github.com/clouddrove/terraform-aws-eks/commit/afab46b2a83c4dd72d9a940881cc2cb5aa4a82bb) - update CHANGELOG.md for 1.4.1 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 25 | 26 | 27 | ## [1.4.1] - 2024-05-07 28 | ### :sparkles: New Features 29 | - [`965397c`](https://github.com/clouddrove/terraform-aws-eks/commit/965397c8d9fbe80d079dc4134b028b16c60da607) - update github-action version and added automerge file *(PR [#61](https://github.com/clouddrove/terraform-aws-eks/pull/61) by [@theprashantyadav](https://github.com/theprashantyadav))* 30 | - [`cfd2b41`](https://github.com/clouddrove/terraform-aws-eks/commit/cfd2b411629688901588c768c59c93be8447b773) - updated example path and readme paramters *(commit by [@Tanveer143s](https://github.com/Tanveer143s))* 31 | 32 | ### :bug: Bug Fixes 33 | - [`5268f7c`](https://github.com/clouddrove/terraform-aws-eks/commit/5268f7ca95d02aa1639fa8a4a6f1af836ab95973) - Update kubernetes provider name and tag. *(PR [#64](https://github.com/clouddrove/terraform-aws-eks/pull/64) by [@nileshgadgi](https://github.com/nileshgadgi))* 34 | 35 | ### :memo: Documentation Changes 36 | - [`9824ae1`](https://github.com/clouddrove/terraform-aws-eks/commit/9824ae1dff440241a1d975b866795d27b000e444) - update CHANGELOG.md for 1.4.0 *(commit by [@clouddrove-ci](https://github.com/clouddrove-ci))* 37 | 38 | 39 | ## [1.4.0] - 2023-09-18 40 | ### :sparkles: New Features 41 | - [`416b3a6`](https://github.com/clouddrove/terraform-aws-eks/commit/416b3a69851bd662faa42ddda561331df3f12c11) - added default eks addons *(commit by [@h1manshu98](https://github.com/h1manshu98))* 42 | - [`4ee24c4`](https://github.com/clouddrove/terraform-aws-eks/commit/4ee24c44638bf4f33a970c2a0605e383aac19f96) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 43 | - [`a63cc9a`](https://github.com/clouddrove/terraform-aws-eks/commit/a63cc9a42ff60c4e969586aea916446c4d73d3e7) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 44 | - [`0854828`](https://github.com/clouddrove/terraform-aws-eks/commit/08548281013efceb2bc58ecfa2b8b7f735bd76dc) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 45 | - [`9cc2ba8`](https://github.com/clouddrove/terraform-aws-eks/commit/9cc2ba84d7c38127049c92f360e48ff2aa9e19dc) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 46 | - [`114b2b4`](https://github.com/clouddrove/terraform-aws-eks/commit/114b2b4d90ac37ac20587f7e0c6182332d10af76) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 47 | - [`b707e3e`](https://github.com/clouddrove/terraform-aws-eks/commit/b707e3e9a376171feff3a8fe5dca69eef0d59b0a) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 48 | - [`cbeab87`](https://github.com/clouddrove/terraform-aws-eks/commit/cbeab870f2456b60e952f75fdac208b95fb1fcf8) - added default eks addons *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 49 | - [`abe8d90`](https://github.com/clouddrove/terraform-aws-eks/commit/abe8d90fd1138ac841fed3bf35b878f0e1012435) - fargate profile added *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 50 | - [`1e4c37a`](https://github.com/clouddrove/terraform-aws-eks/commit/1e4c37abddbecd6f87337c1700f77df852ea5c2f) - fargate profile added *(commit by [@anmolnagpal](https://github.com/anmolnagpal))* 51 | - [`25c9650`](https://github.com/clouddrove/terraform-aws-eks/commit/25c9650645ce130ba13f95cf9ba89850fc7f98ce) - default variable removed *(commit by [@d4kverma](https://github.com/d4kverma))* 52 | - [`8afa1d5`](https://github.com/clouddrove/terraform-aws-eks/commit/8afa1d543e7adf82601565d06445cd6d3e95eea6) - version fixed *(commit by [@d4kverma](https://github.com/d4kverma))* 53 | - [`71b27cd`](https://github.com/clouddrove/terraform-aws-eks/commit/71b27cd7af357fb07b81f665a46a29daa1d465cf) - version fixed *(commit by [@d4kverma](https://github.com/d4kverma))* 54 | - [`9b4604d`](https://github.com/clouddrove/terraform-aws-eks/commit/9b4604d303fdc9a8d365dcb262bd57a35bac8349) - additional tags for public and private subnets *(PR [#58](https://github.com/clouddrove/terraform-aws-eks/pull/58) by [@h1manshu98](https://github.com/h1manshu98))* 55 | 56 | ### :bug: Bug Fixes 57 | - [`24b6c49`](https://github.com/clouddrove/terraform-aws-eks/commit/24b6c493f79176998d4073325feaed7313e15f6e) - Enabled key rotation in fargate example *(commit by [@13archit](https://github.com/13archit))* 58 | - [`10c3a9b`](https://github.com/clouddrove/terraform-aws-eks/commit/10c3a9b32e46a427568399ac9d6a38528d054eee) - Fixed tfsec ignore *(commit by [@13archit](https://github.com/13archit))* 59 | - [`3ea65e5`](https://github.com/clouddrove/terraform-aws-eks/commit/3ea65e562627f93eb4b13f458c59e3b7c9331e76) - Added tfsec ignore *(commit by [@13archit](https://github.com/13archit))* 60 | - [`1bbff08`](https://github.com/clouddrove/terraform-aws-eks/commit/1bbff08dc43595c328337e27b3c207948dea3a6f) - fix tflint workflow. *(commit by [@13archit](https://github.com/13archit))* 61 | - [`72abff5`](https://github.com/clouddrove/terraform-aws-eks/commit/72abff5743e388fd635f3b25e4b1da97bd7c0e9a) - removed keypair module *(commit by [@h1manshu98](https://github.com/h1manshu98))* 62 | - [`eef6961`](https://github.com/clouddrove/terraform-aws-eks/commit/eef69618d577be864c5d0a1624448df54fc0f7bd) - removed keypair module *(commit by [@h1manshu98](https://github.com/h1manshu98))* 63 | - [`3c6b476`](https://github.com/clouddrove/terraform-aws-eks/commit/3c6b4760d91280824075588215a1270cf6cd67ea) - removed keypair module *(commit by [@h1manshu98](https://github.com/h1manshu98))* 64 | 65 | 66 | ## [0.15.2] - 2022-07-05 67 | 68 | ## [1.0.1] - 2022-07-29 69 | 70 | ## [0.12.9.2] - 2022-04-26 71 | 72 | ## [1.0.0] - 2022-03-30 73 | 74 | ## [0.15.1] - 2021-12-10 75 | 76 | ## [0.15.0.1] - 2021-11-11 77 | 78 | ## [0.12.10.1] - 2021-09-03 79 | 80 | ## [0.12.13.1] - 2021-07-22 81 | 82 | ## [0.12.9.1] - 2021-07-22 83 | 84 | ## [0.15.0] - 2021-06-30 85 | 86 | ## [0.12.6.1] - 2021-03-25 87 | 88 | ## [0.13.0] - 2020-11-03 89 | 90 | ## [0.12.13] - 2020-11-02 91 | 92 | ## [0.12.12] - 2020-11-02 93 | 94 | ## [0.12.11] - 2020-09-29 95 | 96 | ## [0.12.10] - 2020-09-08 97 | 98 | ## [0.12.9] - 2020-07-15 99 | 100 | ## [0.12.8] - 2020-07-14 101 | 102 | ## [0.12.7] - 2020-07-02 103 | 104 | ## [0.12.6] - 2020-05-24 105 | 106 | ## [0.12.5] - 2020-03-05 107 | 108 | ## [0.12.4] - 2019-12-30 109 | 110 | ## [0.12.3] - 2019-12-05 111 | 112 | ## [0.12.2] - 2019-12-02 113 | 114 | ## [0.12.0] - 2019-11-08 115 | 116 | 117 | [0.12.0]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.0...master 118 | [0.12.2]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.2...master 119 | [0.12.3]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.3...master 120 | [0.12.4]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.4...master 121 | [0.12.5]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.5...master 122 | [0.12.6]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.6 123 | [0.12.7]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.7...master 124 | [0.12.8]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.8 125 | [0.12.9]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.9...master 126 | [0.12.10]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.10...master 127 | [0.12.11]: https://github.com/clouddrove/terraform-aws-eks/compare/0.12.11...master 128 | [0.12.12]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.12 129 | [0.12.13]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.13 130 | [0.13.0]: https://github.com/clouddrove/terraform-aws-eks/compare/0.13.0...master 131 | [0.12.6.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.6.1 132 | [0.15.0]: https://github.com/clouddrove/terraform-aws-eks/compare/0.15.0...master 133 | [0.12.9.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.9.1 134 | [0.12.13.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.13.1 135 | [0.12.10.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.10.1 136 | [0.15.0.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.15.0.1 137 | [0.15.1]: https://github.com/clouddrove/terraform-aws-eks/compare/0.15.1...master 138 | [1.0.0]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/1.0.0 139 | [0.12.9.2]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.12.9.2 140 | [1.0.1]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/1.0.1 141 | [0.15.2]: https://github.com/clouddrove/terraform-aws-eks/releases/tag/0.15.2 142 | [1.4.0]: https://github.com/clouddrove/terraform-aws-eks/compare/1.3.0...1.4.0 143 | [1.4.1]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.0...1.4.1 144 | [1.4.2]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.1...1.4.2 145 | [1.4.3]: https://github.com/clouddrove/terraform-aws-eks/compare/1.4.2...1.4.3 146 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Cloud Drove 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export GENIE_PATH ?= $(shell 'pwd')/../../../genie 2 | include $(GENIE_PATH)/Makefile 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [![Banner](https://github.com/clouddrove/terraform-module-template/assets/119565952/67a8a1af-2eb7-40b7-ae07-c94cde9ce062)][website] 3 |

4 | Terraform AWS EKS 5 |

6 | 7 |

8 | With our comprehensive DevOps toolkit - streamline operations, automate workflows, enhance collaboration and, most importantly, deploy with confidence. 9 |

10 | 11 | 12 |

13 | 14 | 15 | Terraform 16 | 17 | 18 | tfsec 19 | 20 | 21 | Licence 22 | 23 | 24 | Changelog 25 | 26 | 27 | 28 |

29 |

30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |

45 |
46 | 47 | 48 | We are a group of DevOps engineers & architects, joining hands in this ever evolving digital landscape. With our strong belief in Automation; just like microservices, always on the lookout to split the the infrastructure into smaller connected resources (database, cluster and more) which could be standardized, are manageable, scalable, secure & follow industry best practices. 49 | 50 | 51 | This module includes Terraform open source, examples, and automation tests (for better understanding), which would help you create and improve your infrastructure with minimalistic coding. 52 | 53 | 54 | 55 | 56 | ## Prerequisites and Providers 57 | 58 | This table contains both Prerequisites and Providers: 59 | 60 | | Description | Name | Version | 61 | |:-------------:|:-------------------------------------------:|:---------:| 62 | | **Prerequisite** | [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) | >= 1.5.4 | 63 | | **Provider** | [aws](https://aws.amazon.com/) | >= 5.11.0 | 64 | 65 | 66 | 67 | 68 | 69 | ## Examples 70 | 71 | **IMPORTANT:** Since the master branch used in source varies based on new modifications, we recommend using the [release versions](https://github.com/clouddrove/terraform-aws-eks/releases). 72 | 73 | 📌 For additional usage examples, check the complete list under [`examples/`](./examples) directory. 74 | 75 | 76 | 77 | ## Inputs and Outputs 78 | 79 | Refer to complete documentation: [here](docs/io.md) 80 | 81 | 82 | 93 | 94 | 95 | ## Module Dependencies 96 | 97 | This module has dependencies on: 98 | - [Labels Module](https://github.com/clouddrove/terraform-aws-labels): Provides resource tagging. 99 | 100 | 101 | ## 📑 Changelog 102 | 103 | Refer [here](CHANGELOG.md). 104 | 105 | 106 | 107 | 108 | ## ✨ Contributors 109 | 110 | Big thanks to our contributors for elevating our project with their dedication and expertise! But, we do not wish to stop there, would like to invite contributions from the community in improving these projects and making them more versatile for better reach. Remember, every bit of contribution is immensely valuable, as, together, we are moving in only 1 direction, i.e. forward. 111 | 112 | 113 | 114 | 115 |
116 |
117 | 118 | If you're considering contributing to our project, here are a few quick guidelines that we have been following (Got a suggestion? We are all ears!): 119 | 120 | - **Fork the Repository:** Create a new branch for your feature or bug fix. 121 | - **Coding Standards:** You know the drill. 122 | - **Clear Commit Messages:** Write clear and concise commit messages to facilitate understanding. 123 | - **Thorough Testing:** Test your changes thoroughly before submitting a pull request. 124 | - **Documentation Updates:** Include relevant documentation updates if your changes impact it. 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | ## Feedback 139 | Spot a bug or have thoughts to share with us? Let's squash it together! Log it in our [issue tracker](https://github.com/clouddrove/terraform-aws-eks/issues), feel free to drop us an email at [hello@clouddrove.com](mailto:hello@clouddrove.com). 140 | 141 | Show some love with a ★ on [our GitHub](https://github.com/clouddrove/terraform-aws-eks)! if our work has brightened your day! – your feedback fuels our journey! 142 | 143 | 144 | ## :rocket: Our Accomplishment 145 | 146 | We have [*100+ Terraform modules*][terraform_modules] 🙌. You could consider them finished, but, with enthusiasts like yourself, we are able to ever improve them, so we call our status - improvement in progress. 147 | 148 | - [Terraform Module Registry:](https://registry.terraform.io/namespaces/clouddrove) Discover our Terraform modules here. 149 | 150 | - [Terraform Modules for AWS/Azure Modules:](https://github.com/clouddrove/toc) Explore our comprehensive Table of Contents for easy navigation through our documentation for modules pertaining to AWS, Azure & GCP. 151 | 152 | - [Terraform Modules for Digital Ocean:](https://github.com/terraform-do-modules/toc) Check out our specialized Terraform modules for Digital Ocean. 153 | 154 | 155 | 156 | 157 | ## Join Our Slack Community 158 | 159 | Join our vibrant open-source slack community and embark on an ever-evolving journey with CloudDrove; helping you in moving upwards in your career path. 160 | Join our vibrant Open Source Slack Community and embark on a learning journey with CloudDrove. Grow with us in the world of DevOps and set your career on a path of consistency. 161 | 162 | 🌐💬What you'll get after joining this Slack community: 163 | 164 | - 🚀 Encouragement to upgrade your best version. 165 | - 🌈 Learning companionship with our DevOps squad. 166 | - 🌱 Relentless growth with daily updates on new advancements in technologies. 167 | 168 | Join our tech elites [Join Now][slack] 🚀 169 | 170 | 171 | ## Explore Our Blogs 172 | 173 | Click [here][blog] :books: :star2: 174 | 175 | ## Tap into our capabilities 176 | We provide a platform for organizations to engage with experienced top-tier DevOps & Cloud services. Tap into our pool of certified engineers and architects to elevate your DevOps and Cloud Solutions. 177 | 178 | At [CloudDrove][website], has extensive experience in designing, building & migrating environments, securing, consulting, monitoring, optimizing, automating, and maintaining complex and large modern systems. With remarkable client footprints in American & European corridors, our certified architects & engineers are ready to serve you as per your requirements & schedule. Write to us at [business@clouddrove.com](mailto:business@clouddrove.com). 179 | 180 |

We are The Cloud Experts!

181 |
182 |

We ❤️ Open Source and you can check out our other modules to get help with your new Cloud ideas.

183 | 184 | [website]: https://clouddrove.com 185 | [blog]: https://blog.clouddrove.com 186 | [slack]: https://www.launchpass.com/devops-talks 187 | [github]: https://github.com/clouddrove 188 | [linkedin]: https://cpco.io/linkedin 189 | [twitter]: https://twitter.com/clouddrove/ 190 | [email]: https://clouddrove.com/contact-us.html 191 | [terraform_modules]: https://github.com/clouddrove?utf8=%E2%9C%93&q=terraform-&type=&language= 192 | -------------------------------------------------------------------------------- /README.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # This is the canonical configuration for the `README.md` 4 | # Run `make readme` to rebuild the `README.md` 5 | # 6 | 7 | # Name of this project 8 | name: Terraform AWS EKS 9 | 10 | # License of this project 11 | license: "MIT" 12 | 13 | # Canonical GitHub repo 14 | github_repo: clouddrove/terraform-aws-eks 15 | 16 | # Badges to display 17 | badges: 18 | - name: "Terraform" 19 | image: "https://img.shields.io/badge/Terraform-v0.13-green" 20 | url: "https://www.terraform.io" 21 | - name: "tfsec" 22 | image: "https://github.com/clouddrove/terraform-aws-eks/actions/workflows/tfsec.yml/badge.svg" 23 | url: "https://github.com/clouddrove/terraform-aws-eks/actions/workflows/tfsec.yml" 24 | - name: "Licence" 25 | image: "https://img.shields.io/badge/License-APACHE-blue.svg" 26 | url: "LICENSE.md" 27 | - name: "Changelog" 28 | image: "https://img.shields.io/badge/Changelog-blue" 29 | url: "CHANGELOG.md" 30 | 31 | # Prerequesties to display 32 | prerequesties: 33 | - name: Terraform 34 | url: https://learn.hashicorp.com/terraform/getting-started/install.html 35 | version: ">= 1.5.4" 36 | 37 | providers: 38 | - name: aws 39 | url: https://aws.amazon.com/ 40 | version: ">= 5.11.0" 41 | 42 | module_dependencies: 43 | - name: Labels Module 44 | url: https://github.com/clouddrove/terraform-aws-labels 45 | description: Provides resource tagging. 46 | 47 | # description of this project 48 | description: |- 49 | Terraform module will be created Autoscaling, Workers, EKS, Node Groups. 50 | 51 | # How to use this project 52 | usage : |- 53 | ### Sample example 54 | Here is an example of how you can use this module in your inventory structure: 55 | ```hcl 56 | module "eks" { 57 | source = "clouddrove/eks/aws" 58 | version = "1.0.1" 59 | 60 | name = "eks" 61 | environment = "test" 62 | label_order = ["environment", "name"] 63 | enabled = true 64 | 65 | kubernetes_version = "1.25" 66 | endpoint_private_access = true 67 | endpoint_public_access = true 68 | enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] 69 | oidc_provider_enabled = true 70 | 71 | # Network 72 | vpc_id = module.vpc.vpc_id 73 | subnet_ids = module.subnets.private_subnet_id 74 | allowed_security_groups = [module.ssh.security_group_ids] 75 | allowed_cidr_blocks = ["0.0.0.0/0"] 76 | 77 | # Node Groups Defaults Values It will Work all Node Groups 78 | self_node_group_defaults = { 79 | subnet_ids = module.subnets.private_subnet_id 80 | key_name = module.keypair.name 81 | propagate_tags = [{ 82 | key = "aws-node-termination-handler/managed" 83 | value = true 84 | propagate_at_launch = true 85 | }, 86 | { 87 | key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" 88 | value = "owned" 89 | propagate_at_launch = true 90 | } 91 | ] 92 | 93 | block_device_mappings = { 94 | xvda = { 95 | device_name = "/dev/xvda" 96 | ebs = { 97 | volume_size = 50 98 | volume_type = "gp3" 99 | iops = 3000 100 | throughput = 150 101 | } 102 | } 103 | } 104 | } 105 | 106 | 107 | self_node_groups = { 108 | tools = { 109 | name = "tools" 110 | min_size = 1 111 | max_size = 7 112 | desired_size = 2 113 | bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" 114 | instance_type = "t3a.medium" 115 | } 116 | 117 | spot = { 118 | name = "spot" 119 | instance_market_options = { 120 | market_type = "spot" 121 | } 122 | min_size = 1 123 | max_size = 7 124 | desired_size = 1 125 | bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" 126 | instance_type = "m5.large" 127 | } 128 | } 129 | 130 | # Schdule self Managed Auto Scaling node group 131 | schedules = { 132 | scale-up = { 133 | min_size = 2 134 | max_size = 2 # Retains current max size 135 | desired_size = 2 136 | start_time = "2023-05-15T19:00:00Z" 137 | end_time = "2023-05-19T19:00:00Z" 138 | timezone = "Europe/Amsterdam" 139 | recurrence = "0 7 * * 1" 140 | }, 141 | scale-down = { 142 | min_size = 0 143 | max_size = 0 # Retains current max size 144 | desired_size = 0 145 | start_time = "2023-05-12T12:00:00Z" 146 | end_time = "2024-03-05T12:00:00Z" 147 | timezone = "Europe/Amsterdam" 148 | recurrence = "0 7 * * 5" 149 | } 150 | } 151 | 152 | # Node Groups Defaults Values It will Work all Node Groups 153 | managed_node_group_defaults = { 154 | subnet_ids = module.subnets.private_subnet_id 155 | key_name = module.keypair.name 156 | nodes_additional_security_group_ids = [module.ssh.security_group_ids] 157 | tags = { 158 | Example = "test" 159 | } 160 | 161 | block_device_mappings = { 162 | xvda = { 163 | device_name = "/dev/xvda" 164 | ebs = { 165 | volume_size = 50 166 | volume_type = "gp3" 167 | iops = 3000 168 | throughput = 150 169 | } 170 | } 171 | } 172 | } 173 | 174 | managed_node_group = { 175 | test = { 176 | min_size = 1 177 | max_size = 7 178 | desired_size = 2 179 | instance_types = ["t3a.medium"] 180 | } 181 | 182 | spot = { 183 | name = "spot" 184 | capacity_type = "SPOT" 185 | 186 | min_size = 1 187 | max_size = 7 188 | desired_size = 1 189 | force_update_version = true 190 | instance_types = ["t3.medium", "t3a.medium"] 191 | } 192 | } 193 | 194 | apply_config_map_aws_auth = true 195 | map_additional_iam_users = [ 196 | { 197 | userarn = "arn:aws:iam::xxxxxx:user/nikita@clouddrove.com" 198 | username = "nikita@clouddrove.com" 199 | groups = ["system:masters"] 200 | }, 201 | { 202 | userarn = "arn:aws:iam::xxxxxx:user/sohan@clouddrove.com" 203 | username = "sohan@clouddrove.com" 204 | groups = ["system:masters"] 205 | } 206 | ] 207 | # Schdule EKS Managed Auto Scaling node group 208 | schedules = { 209 | scale-up = { 210 | min_size = 2 211 | max_size = 2 # Retains current max size 212 | desired_size = 2 213 | start_time = "2023-05-15T19:00:00Z" 214 | end_time = "2023-05-19T19:00:00Z" 215 | timezone = "Europe/Amsterdam" 216 | recurrence = "0 7 * * 1" 217 | }, 218 | scale-down = { 219 | min_size = 0 220 | max_size = 0 # Retains current max size 221 | desired_size = 0 222 | start_time = "2023-05-12T12:00:00Z" 223 | end_time = "2024-03-05T12:00:00Z" 224 | timezone = "Europe/Amsterdam" 225 | recurrence = "0 7 * * 5" 226 | } 227 | } 228 | } 229 | ``` 230 | -------------------------------------------------------------------------------- /aws_auth.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | # The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster 4 | # to allow worker nodes to join the cluster via AWS IAM role authentication. 5 | 6 | # NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster), 7 | # the requirements outlined here must be met: 8 | # https://learn.hashicorp.com/terraform/aws/eks-intro#preparation 9 | # https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks 10 | # https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes 11 | 12 | # Additional links 13 | # https://learn.hashicorp.com/terraform/aws/eks-intro 14 | # https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b 15 | # https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html 16 | # https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html 17 | # https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html 18 | # https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html 19 | # https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb 20 | # http://marcinkaszynski.com/2018/07/12/eks-auth.html 21 | # https://cloud.google.com/kubernetes-engine/docs/concepts/configmap 22 | # http://yaml-multiline.info 23 | # https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216 24 | # https://www.terraform.io/docs/cloud/run/install-software.html 25 | # https://stackoverflow.com/questions/26123740/is-it-possible-to-install-aws-cli-package-without-root-permission 26 | # https://stackoverflow.com/questions/58232731/kubectl-missing-form-terraform-cloud 27 | # https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html 28 | # https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html 29 | 30 | 31 | locals { 32 | certificate_authority_data_list = coalescelist(aws_eks_cluster.default.*.certificate_authority, [[{ data : "" }]]) 33 | certificate_authority_data_list_internal = local.certificate_authority_data_list[0] 34 | certificate_authority_data_map = local.certificate_authority_data_list_internal[0] 35 | certificate_authority_data = local.certificate_authority_data_map["data"] 36 | 37 | # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap 38 | # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically 39 | map_worker_roles = [ 40 | { 41 | rolearn : aws_iam_role.node_groups.0.arn 42 | username : "system:node:{{EC2PrivateDNSName}}" 43 | groups : [ 44 | "system:bootstrappers", 45 | "system:nodes" 46 | ] 47 | } 48 | ] 49 | } 50 | 51 | data "template_file" "kubeconfig" { 52 | count = var.enabled ? 1 : 0 53 | template = file("${path.module}/kubeconfig.tpl") 54 | 55 | vars = { 56 | server = aws_eks_cluster.default[0].endpoint 57 | certificate_authority_data = local.certificate_authority_data 58 | cluster_name = module.labels.id 59 | } 60 | } 61 | 62 | resource "null_resource" "wait_for_cluster" { 63 | count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 64 | depends_on = [aws_eks_cluster.default[0]] 65 | 66 | provisioner "local-exec" { 67 | command = var.wait_for_cluster_command 68 | interpreter = var.local_exec_interpreter 69 | environment = { 70 | ENDPOINT = aws_eks_cluster.default[0].endpoint 71 | } 72 | } 73 | } 74 | 75 | data "aws_eks_cluster" "eks" { 76 | count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 77 | name = aws_eks_cluster.default[0].id 78 | } 79 | 80 | # Get an authentication token to communicate with the EKS cluster. 81 | # By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster. 82 | # `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token. 83 | # If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token. 84 | # https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html 85 | data "aws_eks_cluster_auth" "eks" { 86 | count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 87 | name = aws_eks_cluster.default[0].id 88 | } 89 | 90 | provider "kubernetes" { 91 | token = data.aws_eks_cluster_auth.eks[0].token 92 | host = data.aws_eks_cluster.eks[0].endpoint 93 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks[0].certificate_authority.0.data) 94 | } 95 | 96 | resource "kubernetes_config_map" "aws_auth_ignore_changes" { 97 | count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0 98 | depends_on = [null_resource.wait_for_cluster[0]] 99 | 100 | metadata { 101 | name = "aws-auth" 102 | namespace = "kube-system" 103 | } 104 | 105 | data = { 106 | mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))) 107 | mapUsers = yamlencode(var.map_additional_iam_users) 108 | mapAccounts = yamlencode(var.map_additional_aws_accounts) 109 | } 110 | 111 | lifecycle { 112 | ignore_changes = [data["mapRoles"]] 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /aws_node_groups.tf: -------------------------------------------------------------------------------- 1 | module "eks_managed_node_group" { 2 | source = "./node_group/aws_managed" 3 | 4 | for_each = { for k, v in var.managed_node_group : k => v if var.enabled } 5 | 6 | enabled = try(each.value.enabled, true) 7 | 8 | cluster_name = aws_eks_cluster.default[0].name 9 | cluster_version = var.kubernetes_version 10 | vpc_security_group_ids = compact( 11 | concat( 12 | aws_security_group.node_group.*.id, 13 | aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id, 14 | var.nodes_additional_security_group_ids 15 | 16 | ) 17 | ) 18 | # EKS Managed Node Group 19 | name = try(each.value.name, each.key) 20 | environment = try(each.value.name != "" ? "" : var.environment, var.environment) 21 | repository = var.repository 22 | subnet_ids = try(each.value.subnet_ids, var.managed_node_group_defaults.subnet_ids, var.subnet_ids) 23 | 24 | min_size = try(each.value.min_size, var.managed_node_group_defaults.min_size, 1) 25 | max_size = try(each.value.max_size, var.managed_node_group_defaults.max_size, 3) 26 | desired_size = try(each.value.desired_size, var.managed_node_group_defaults.desired_size, 1) 27 | 28 | ami_id = try(each.value.ami_id, var.managed_node_group_defaults.ami_id, "") 29 | ami_type = try(each.value.ami_type, var.managed_node_group_defaults.ami_type, null) 30 | ami_release_version = try(each.value.ami_release_version, var.managed_node_group_defaults.ami_release_version, null) 31 | 32 | capacity_type = try(each.value.capacity_type, var.managed_node_group_defaults.capacity_type, null) 33 | disk_size = try(each.value.disk_size, var.managed_node_group_defaults.disk_size, null) 34 | force_update_version = try(each.value.force_update_version, var.managed_node_group_defaults.force_update_version, null) 35 | instance_types = try(each.value.instance_types, var.managed_node_group_defaults.instance_types, null) 36 | labels = try(each.value.labels, var.managed_node_group_defaults.labels, null) 37 | 38 | remote_access = try(each.value.remote_access, var.managed_node_group_defaults.remote_access, {}) 39 | taints = try(each.value.taints, var.managed_node_group_defaults.taints, {}) 40 | update_config = try(each.value.update_config, var.managed_node_group_defaults.update_config, {}) 41 | timeouts = try(each.value.timeouts, var.managed_node_group_defaults.timeouts, {}) 42 | 43 | #------------ASG-Schedule-------------------------------------------------- 44 | create_schedule = try(each.value.create_schedule, var.managed_node_group_defaults.create_schedule, true) 45 | schedules = try(each.value.schedules, var.managed_node_group_defaults.schedules, var.schedules) 46 | 47 | # Launch Template 48 | launch_template_description = try(each.value.launch_template_description, var.managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") 49 | launch_template_tags = try(each.value.launch_template_tags, var.managed_node_group_defaults.launch_template_tags, {}) 50 | 51 | ebs_optimized = try(each.value.ebs_optimized, var.managed_node_group_defaults.ebs_optimized, null) 52 | key_name = try(each.value.key_name, var.managed_node_group_defaults.key_name, null) 53 | kms_key_id = try(each.value.kms_key_id, var.managed_node_group_defaults.ebs_optimized, null) 54 | 55 | launch_template_default_version = try(each.value.launch_template_default_version, var.managed_node_group_defaults.launch_template_default_version, null) 56 | update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.managed_node_group_defaults.update_launch_template_default_version, true) 57 | disable_api_termination = try(each.value.disable_api_termination, var.managed_node_group_defaults.disable_api_termination, null) 58 | kernel_id = try(each.value.kernel_id, var.managed_node_group_defaults.kernel_id, null) 59 | ram_disk_id = try(each.value.ram_disk_id, var.managed_node_group_defaults.ram_disk_id, null) 60 | 61 | block_device_mappings = try(each.value.block_device_mappings, var.managed_node_group_defaults.block_device_mappings, {}) 62 | capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.managed_node_group_defaults.capacity_reservation_specification, null) 63 | cpu_options = try(each.value.cpu_options, var.managed_node_group_defaults.cpu_options, null) 64 | credit_specification = try(each.value.credit_specification, var.managed_node_group_defaults.credit_specification, null) 65 | elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, var.managed_node_group_defaults.elastic_gpu_specifications, null) 66 | elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.managed_node_group_defaults.elastic_inference_accelerator, null) 67 | enclave_options = try(each.value.enclave_options, var.managed_node_group_defaults.enclave_options, null) 68 | license_specifications = try(each.value.license_specifications, var.managed_node_group_defaults.license_specifications, null) 69 | metadata_options = try(each.value.metadata_options, var.managed_node_group_defaults.metadata_options, local.metadata_options) 70 | enable_monitoring = try(each.value.enable_monitoring, var.managed_node_group_defaults.enable_monitoring, true) 71 | network_interfaces = try(each.value.network_interfaces, var.managed_node_group_defaults.network_interfaces, []) 72 | placement = try(each.value.placement, var.managed_node_group_defaults.placement, null) 73 | 74 | # IAM role 75 | iam_role_arn = aws_iam_role.node_groups[0].arn 76 | 77 | tags = merge(var.tags, try(each.value.tags, var.managed_node_group_defaults.tags, {})) 78 | } 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | data "aws_region" "current" {} 4 | 5 | -------------------------------------------------------------------------------- /docs/io.md: -------------------------------------------------------------------------------- 1 | ## Inputs 2 | 3 | | Name | Description | Type | Default | Required | 4 | |------|-------------|------|---------|:--------:| 5 | | addons | Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources. | `any` | `[]` | no | 6 | | allowed\_cidr\_blocks | List of CIDR blocks to be allowed to connect to the EKS cluster. | `list(string)` | `[]` | no | 7 | | allowed\_security\_groups | List of Security Group IDs to be allowed to connect to the EKS cluster. | `list(string)` | `[]` | no | 8 | | apply\_config\_map\_aws\_auth | Whether to generate local files from `kubeconfig` and `config_map_aws_auth` and perform `kubectl apply` to apply the ConfigMap to allow the worker nodes to join the EKS cluster. | `bool` | `true` | no | 9 | | attributes | Additional attributes (e.g. `1`). | `list(any)` | `[]` | no | 10 | | cluster\_encryption\_config\_enabled | Set to `true` to enable Cluster Encryption Configuration | `bool` | `true` | no | 11 | | cluster\_encryption\_config\_kms\_key\_deletion\_window\_in\_days | Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction | `number` | `10` | no | 12 | | cluster\_encryption\_config\_kms\_key\_enable\_key\_rotation | Cluster Encryption Config KMS Key Resource argument - enable kms key rotation | `bool` | `true` | no | 13 | | cluster\_encryption\_config\_kms\_key\_policy | Cluster Encryption Config KMS Key Resource argument - key policy | `string` | `null` | no | 14 | | cluster\_encryption\_config\_resources | Cluster Encryption Config Resources to encrypt, e.g. ['secrets'] | `list(any)` |
[
"secrets"
]
| no | 15 | | cluster\_ip\_family | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `null` | no | 16 | | cluster\_log\_retention\_period | Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. | `number` | `30` | no | 17 | | cluster\_service\_ipv4\_cidr | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | 18 | | cluster\_service\_ipv6\_cidr | The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster | `string` | `null` | no | 19 | | cluster\_timeouts | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | 20 | | create\_schedule | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | 21 | | eks\_additional\_security\_group\_ids | EKS additional security group id | `list(string)` | `[]` | no | 22 | | eks\_tags | Additional tags for EKS Cluster only. | `map(any)` | `{}` | no | 23 | | enabled | Whether to create the resources. Set to `false` to prevent the module from creating any resources. | `bool` | `true` | no | 24 | | enabled\_cluster\_log\_types | A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]. | `list(string)` |
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
| no | 25 | | endpoint\_private\_access | Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false. | `bool` | `true` | no | 26 | | endpoint\_public\_access | Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true. | `bool` | `true` | no | 27 | | environment | Environment (e.g. `prod`, `dev`, `staging`). | `string` | `""` | no | 28 | | fargate\_enabled | Whether fargate profile is enabled or not | `bool` | `false` | no | 29 | | fargate\_profiles | The number of Fargate Profiles that would be created. | `map(any)` | `{}` | no | 30 | | iam\_role\_additional\_policies | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | 31 | | kubernetes\_version | Desired Kubernetes master version. If you do not specify a value, the latest available version is used. | `string` | `""` | no | 32 | | label\_order | Label order, e.g. `name`,`application`. | `list(any)` |
[
"name",
"environment"
]
| no | 33 | | local\_exec\_interpreter | shell to use for local\_exec | `list(string)` |
[
"/bin/sh",
"-c"
]
| no | 34 | | managed\_node\_group | Map of eks-managed node group definitions to create | `any` | `{}` | no | 35 | | managed\_node\_group\_defaults | Map of eks-managed node group definitions to create | `any` | `{}` | no | 36 | | managedby | ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'. | `string` | `"hello@clouddrove.com"` | no | 37 | | map\_additional\_aws\_accounts | Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap | `list(string)` | `[]` | no | 38 | | map\_additional\_iam\_roles | Additional IAM roles to add to `config-map-aws-auth` ConfigMap |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | 39 | | map\_additional\_iam\_users | Additional IAM users to add to `config-map-aws-auth` ConfigMap |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | 40 | | name | Name (e.g. `app` or `cluster`). | `string` | `""` | no | 41 | | nodes\_additional\_security\_group\_ids | EKS additional node group ids | `list(string)` | `[]` | no | 42 | | oidc\_provider\_enabled | Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html | `bool` | `true` | no | 43 | | openid\_connect\_audiences | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | 44 | | outpost\_config | Configuration for the AWS Outpost to provision the cluster on | `any` | `{}` | no | 45 | | permissions\_boundary | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no | 46 | | public\_access\_cidrs | Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0. | `list(string)` |
[
"0.0.0.0/0"
]
| no | 47 | | repository | Terraform current module repo | `string` | `"https://github.com/clouddrove/terraform-aws-eks"` | no | 48 | | schedules | Map of autoscaling group schedule to create | `map(any)` | `{}` | no | 49 | | self\_node\_group\_defaults | Map of self-managed node group default configurations | `any` | `{}` | no | 50 | | self\_node\_groups | Map of self-managed node group definitions to create | `any` | `{}` | no | 51 | | subnet\_ids | A list of subnet IDs to launch the cluster in. | `list(string)` | `[]` | no | 52 | | tags | Additional tags (e.g. map(`BusinessUnit`,`XYZ`). | `map(any)` | `{}` | no | 53 | | vpc\_id | VPC ID for the EKS cluster. | `string` | `""` | no | 54 | | vpc\_security\_group\_ids | A list of security group IDs to associate | `list(string)` | `[]` | no | 55 | | wait\_for\_cluster\_command | `local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT` | `string` | `"curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz"` | no | 56 | 57 | ## Outputs 58 | 59 | | Name | Description | 60 | |------|-------------| 61 | | cluster\_arn | The Amazon Resource Name (ARN) of the cluster | 62 | | cluster\_certificate\_authority\_data | Base64 encoded certificate data required to communicate with the cluster | 63 | | cluster\_endpoint | Endpoint for your Kubernetes API server | 64 | | cluster\_iam\_role\_arn | IAM role ARN of the EKS cluster | 65 | | cluster\_iam\_role\_name | IAM role name of the EKS cluster | 66 | | cluster\_iam\_role\_unique\_id | Stable and unique string identifying the IAM role | 67 | | cluster\_id | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready | 68 | | cluster\_name | n/a | 69 | | cluster\_oidc\_issuer\_url | The URL on the EKS cluster for the OpenID Connect identity provider | 70 | | cluster\_platform\_version | Platform version for the cluster | 71 | | cluster\_primary\_security\_group\_id | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use default security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | 72 | | cluster\_status | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | 73 | | node\_group\_iam\_role\_arn | IAM role ARN of the EKS cluster | 74 | | node\_group\_iam\_role\_name | IAM role name of the EKS cluster | 75 | | node\_group\_iam\_role\_unique\_id | Stable and unique string identifying the IAM role | 76 | | node\_security\_group\_arn | Amazon Resource Name (ARN) of the node shared security group | 77 | | node\_security\_group\_id | ID of the node shared security group | 78 | | oidc\_provider\_arn | The ARN of the OIDC Provider if `enable_irsa = true` | 79 | | tags | n/a | 80 | 81 | -------------------------------------------------------------------------------- /examples/aws_managed/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | 5 | locals { 6 | name = "clouddrove-eks" 7 | region = "eu-west-1" 8 | vpc_cidr_block = module.vpc.vpc_cidr_block 9 | additional_cidr_block = "172.16.0.0/16" 10 | environment = "test" 11 | label_order = ["name", "environment"] 12 | tags = { 13 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 14 | } 15 | } 16 | 17 | ################################################################################ 18 | # VPC module call 19 | ################################################################################ 20 | module "vpc" { 21 | source = "clouddrove/vpc/aws" 22 | version = "2.0.0" 23 | 24 | name = "${local.name}-vpc" 25 | environment = local.environment 26 | cidr_block = "10.10.0.0/16" 27 | } 28 | 29 | # ################################################################################ 30 | # # Subnets moudle call 31 | # ################################################################################ 32 | 33 | module "subnets" { 34 | source = "clouddrove/subnet/aws" 35 | version = "2.0.0" 36 | 37 | name = "${local.name}-subnet" 38 | environment = local.environment 39 | nat_gateway_enabled = true 40 | single_nat_gateway = true 41 | availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"] 42 | vpc_id = module.vpc.vpc_id 43 | type = "public-private" 44 | igw_id = module.vpc.igw_id 45 | cidr_block = local.vpc_cidr_block 46 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 47 | enable_ipv6 = false 48 | 49 | extra_public_tags = { 50 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 51 | "kubernetes.io/role/elb" = "1" 52 | } 53 | 54 | extra_private_tags = { 55 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 56 | "kubernetes.io/role/internal-elb" = "1" 57 | } 58 | 59 | public_inbound_acl_rules = [ 60 | { 61 | rule_number = 100 62 | rule_action = "allow" 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | cidr_block = "0.0.0.0/0" 67 | }, 68 | { 69 | rule_number = 101 70 | rule_action = "allow" 71 | from_port = 0 72 | to_port = 0 73 | protocol = "-1" 74 | ipv6_cidr_block = "::/0" 75 | }, 76 | ] 77 | 78 | public_outbound_acl_rules = [ 79 | { 80 | rule_number = 100 81 | rule_action = "allow" 82 | from_port = 0 83 | to_port = 0 84 | protocol = "-1" 85 | cidr_block = "0.0.0.0/0" 86 | }, 87 | { 88 | rule_number = 101 89 | rule_action = "allow" 90 | from_port = 0 91 | to_port = 0 92 | protocol = "-1" 93 | ipv6_cidr_block = "::/0" 94 | }, 95 | ] 96 | 97 | private_inbound_acl_rules = [ 98 | { 99 | rule_number = 100 100 | rule_action = "allow" 101 | from_port = 0 102 | to_port = 0 103 | protocol = "-1" 104 | cidr_block = "0.0.0.0/0" 105 | }, 106 | { 107 | rule_number = 101 108 | rule_action = "allow" 109 | from_port = 0 110 | to_port = 0 111 | protocol = "-1" 112 | ipv6_cidr_block = "::/0" 113 | }, 114 | ] 115 | 116 | private_outbound_acl_rules = [ 117 | { 118 | rule_number = 100 119 | rule_action = "allow" 120 | from_port = 0 121 | to_port = 0 122 | protocol = "-1" 123 | cidr_block = "0.0.0.0/0" 124 | }, 125 | { 126 | rule_number = 101 127 | rule_action = "allow" 128 | from_port = 0 129 | to_port = 0 130 | protocol = "-1" 131 | ipv6_cidr_block = "::/0" 132 | }, 133 | ] 134 | } 135 | 136 | # ################################################################################ 137 | # Security Groups module call 138 | ################################################################################ 139 | 140 | module "ssh" { 141 | source = "clouddrove/security-group/aws" 142 | version = "2.0.0" 143 | 144 | name = "${local.name}-ssh" 145 | environment = local.environment 146 | vpc_id = module.vpc.vpc_id 147 | new_sg_ingress_rules_with_cidr_blocks = [{ 148 | rule_count = 1 149 | from_port = 22 150 | protocol = "tcp" 151 | to_port = 22 152 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 153 | description = "Allow ssh traffic." 154 | }, 155 | { 156 | rule_count = 2 157 | from_port = 27017 158 | protocol = "tcp" 159 | to_port = 27017 160 | cidr_blocks = [local.additional_cidr_block] 161 | description = "Allow Mongodb traffic." 162 | } 163 | ] 164 | 165 | ## EGRESS Rules 166 | new_sg_egress_rules_with_cidr_blocks = [{ 167 | rule_count = 1 168 | from_port = 22 169 | protocol = "tcp" 170 | to_port = 22 171 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 172 | description = "Allow ssh outbound traffic." 173 | }, 174 | { 175 | rule_count = 2 176 | from_port = 27017 177 | protocol = "tcp" 178 | to_port = 27017 179 | cidr_blocks = [local.additional_cidr_block] 180 | description = "Allow Mongodb outbound traffic." 181 | }] 182 | } 183 | 184 | module "http_https" { 185 | source = "clouddrove/security-group/aws" 186 | version = "2.0.0" 187 | 188 | name = "${local.name}-http-https" 189 | environment = local.environment 190 | 191 | vpc_id = module.vpc.vpc_id 192 | ## INGRESS Rules 193 | new_sg_ingress_rules_with_cidr_blocks = [{ 194 | rule_count = 1 195 | from_port = 22 196 | protocol = "tcp" 197 | to_port = 22 198 | cidr_blocks = [local.vpc_cidr_block] 199 | description = "Allow ssh traffic." 200 | }, 201 | { 202 | rule_count = 2 203 | from_port = 80 204 | protocol = "tcp" 205 | to_port = 80 206 | cidr_blocks = [local.vpc_cidr_block] 207 | description = "Allow http traffic." 208 | }, 209 | { 210 | rule_count = 3 211 | from_port = 443 212 | protocol = "tcp" 213 | to_port = 443 214 | cidr_blocks = [local.vpc_cidr_block] 215 | description = "Allow https traffic." 216 | } 217 | ] 218 | 219 | ## EGRESS Rules 220 | new_sg_egress_rules_with_cidr_blocks = [{ 221 | rule_count = 1 222 | from_port = 0 223 | protocol = "-1" 224 | to_port = 0 225 | cidr_blocks = ["0.0.0.0/0"] 226 | ipv6_cidr_blocks = ["::/0"] 227 | description = "Allow all traffic." 228 | } 229 | ] 230 | } 231 | 232 | ################################################################################ 233 | # KMS Module call 234 | ################################################################################ 235 | module "kms" { 236 | source = "clouddrove/kms/aws" 237 | version = "1.3.0" 238 | 239 | name = "${local.name}-kms" 240 | environment = local.environment 241 | label_order = local.label_order 242 | enabled = true 243 | description = "KMS key for EBS of EKS nodes" 244 | enable_key_rotation = false 245 | policy = data.aws_iam_policy_document.kms.json 246 | } 247 | 248 | data "aws_iam_policy_document" "kms" { 249 | version = "2012-10-17" 250 | statement { 251 | sid = "Enable IAM User Permissions" 252 | effect = "Allow" 253 | principals { 254 | type = "AWS" 255 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 256 | } 257 | actions = ["kms:*"] 258 | resources = ["*"] 259 | } 260 | } 261 | 262 | data "aws_caller_identity" "current" {} 263 | 264 | ################################################################################ 265 | # EKS Module call 266 | ################################################################################ 267 | module "eks" { 268 | source = "../.." 269 | enabled = true 270 | 271 | name = local.name 272 | environment = local.environment 273 | label_order = local.label_order 274 | 275 | # EKS 276 | kubernetes_version = "1.32" 277 | endpoint_public_access = true 278 | # Networking 279 | vpc_id = module.vpc.vpc_id 280 | subnet_ids = module.subnets.private_subnet_id 281 | allowed_security_groups = [module.ssh.security_group_id] 282 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 283 | allowed_cidr_blocks = [local.vpc_cidr_block] 284 | 285 | # AWS Managed Node Group 286 | # Node Groups Defaults Values It will Work all Node Groups 287 | managed_node_group_defaults = { 288 | subnet_ids = module.subnets.private_subnet_id 289 | nodes_additional_security_group_ids = [module.ssh.security_group_id] 290 | tags = { 291 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 292 | "k8s.io/cluster/${module.eks.cluster_name}" = "shared" 293 | } 294 | block_device_mappings = { 295 | xvda = { 296 | device_name = "/dev/xvda" 297 | ebs = { 298 | volume_size = 50 299 | volume_type = "gp3" 300 | iops = 3000 301 | throughput = 150 302 | encrypted = true 303 | kms_key_id = module.kms.key_arn 304 | } 305 | } 306 | } 307 | } 308 | managed_node_group = { 309 | critical = { 310 | name = "${module.eks.cluster_name}-critical" 311 | capacity_type = "ON_DEMAND" 312 | min_size = 1 313 | max_size = 2 314 | desired_size = 2 315 | instance_types = ["t3.medium"] 316 | ami_type = "BOTTLEROCKET_x86_64" 317 | } 318 | 319 | application = { 320 | name = "${module.eks.cluster_name}-application" 321 | capacity_type = "SPOT" 322 | min_size = 1 323 | max_size = 2 324 | desired_size = 1 325 | force_update_version = true 326 | instance_types = ["t3.medium"] 327 | ami_type = "BOTTLEROCKET_x86_64" 328 | } 329 | } 330 | 331 | apply_config_map_aws_auth = true 332 | map_additional_iam_users = [ 333 | { 334 | userarn = "arn:aws:iam::123456789:user/hello@clouddrove.com" 335 | username = "hello@clouddrove.com" 336 | groups = ["system:masters"] 337 | } 338 | ] 339 | } 340 | ## Kubernetes provider configuration 341 | data "aws_eks_cluster" "this" { 342 | depends_on = [module.eks] 343 | name = module.eks.cluster_id 344 | } 345 | 346 | data "aws_eks_cluster_auth" "this" { 347 | depends_on = [module.eks] 348 | name = module.eks.cluster_certificate_authority_data 349 | } 350 | 351 | provider "kubernetes" { 352 | host = data.aws_eks_cluster.this.endpoint 353 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 354 | token = data.aws_eks_cluster_auth.this.token 355 | } 356 | -------------------------------------------------------------------------------- /examples/aws_managed/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/clouddrove/terraform-aws-eks/0245d6b7aaaa7c499a5aa5440d2d785f594c5f9c/examples/aws_managed/output.tf -------------------------------------------------------------------------------- /examples/aws_managed/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /examples/aws_managed_with_fargate/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | 5 | locals { 6 | name = "clouddrove-eks" 7 | region = "eu-west-1" 8 | vpc_cidr_block = module.vpc.vpc_cidr_block 9 | additional_cidr_block = "172.16.0.0/16" 10 | environment = "test" 11 | label_order = ["name", "environment"] 12 | tags = { 13 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 14 | } 15 | } 16 | 17 | ################################################################################ 18 | # VPC module call 19 | ################################################################################ 20 | module "vpc" { 21 | source = "clouddrove/vpc/aws" 22 | version = "2.0.0" 23 | 24 | name = "${local.name}-vpc" 25 | environment = local.environment 26 | cidr_block = "10.10.0.0/16" 27 | } 28 | 29 | # ################################################################################ 30 | # # Subnets moudle call 31 | # ################################################################################ 32 | 33 | module "subnets" { 34 | source = "clouddrove/subnet/aws" 35 | version = "2.0.0" 36 | 37 | name = "${local.name}-subnet" 38 | environment = local.environment 39 | nat_gateway_enabled = true 40 | single_nat_gateway = true 41 | availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"] 42 | vpc_id = module.vpc.vpc_id 43 | type = "public-private" 44 | igw_id = module.vpc.igw_id 45 | cidr_block = local.vpc_cidr_block 46 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 47 | enable_ipv6 = false 48 | 49 | extra_public_tags = { 50 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 51 | "kubernetes.io/role/elb" = "1" 52 | } 53 | 54 | extra_private_tags = { 55 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "owned" 56 | "kubernetes.io/role/internal-elb" = "1" 57 | } 58 | 59 | public_inbound_acl_rules = [ 60 | { 61 | rule_number = 100 62 | rule_action = "allow" 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | cidr_block = "0.0.0.0/0" 67 | }, 68 | { 69 | rule_number = 101 70 | rule_action = "allow" 71 | from_port = 0 72 | to_port = 0 73 | protocol = "-1" 74 | ipv6_cidr_block = "::/0" 75 | }, 76 | ] 77 | 78 | public_outbound_acl_rules = [ 79 | { 80 | rule_number = 100 81 | rule_action = "allow" 82 | from_port = 0 83 | to_port = 0 84 | protocol = "-1" 85 | cidr_block = "0.0.0.0/0" 86 | }, 87 | { 88 | rule_number = 101 89 | rule_action = "allow" 90 | from_port = 0 91 | to_port = 0 92 | protocol = "-1" 93 | ipv6_cidr_block = "::/0" 94 | }, 95 | ] 96 | 97 | private_inbound_acl_rules = [ 98 | { 99 | rule_number = 100 100 | rule_action = "allow" 101 | from_port = 0 102 | to_port = 0 103 | protocol = "-1" 104 | cidr_block = "0.0.0.0/0" 105 | }, 106 | { 107 | rule_number = 101 108 | rule_action = "allow" 109 | from_port = 0 110 | to_port = 0 111 | protocol = "-1" 112 | ipv6_cidr_block = "::/0" 113 | }, 114 | ] 115 | 116 | private_outbound_acl_rules = [ 117 | { 118 | rule_number = 100 119 | rule_action = "allow" 120 | from_port = 0 121 | to_port = 0 122 | protocol = "-1" 123 | cidr_block = "0.0.0.0/0" 124 | }, 125 | { 126 | rule_number = 101 127 | rule_action = "allow" 128 | from_port = 0 129 | to_port = 0 130 | protocol = "-1" 131 | ipv6_cidr_block = "::/0" 132 | }, 133 | ] 134 | } 135 | 136 | 137 | # ################################################################################ 138 | # Security Groups module call 139 | ################################################################################ 140 | 141 | module "ssh" { 142 | source = "clouddrove/security-group/aws" 143 | version = "2.0.0" 144 | 145 | name = "${local.name}-ssh" 146 | environment = local.environment 147 | vpc_id = module.vpc.vpc_id 148 | new_sg_ingress_rules_with_cidr_blocks = [{ 149 | rule_count = 1 150 | from_port = 22 151 | protocol = "tcp" 152 | to_port = 22 153 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 154 | description = "Allow ssh traffic." 155 | }, 156 | { 157 | rule_count = 2 158 | from_port = 27017 159 | protocol = "tcp" 160 | to_port = 27017 161 | cidr_blocks = [local.additional_cidr_block] 162 | description = "Allow Mongodb traffic." 163 | } 164 | ] 165 | 166 | ## EGRESS Rules 167 | new_sg_egress_rules_with_cidr_blocks = [{ 168 | rule_count = 1 169 | from_port = 22 170 | protocol = "tcp" 171 | to_port = 22 172 | cidr_blocks = [local.vpc_cidr_block, local.additional_cidr_block] 173 | description = "Allow ssh outbound traffic." 174 | }, 175 | { 176 | rule_count = 2 177 | from_port = 27017 178 | protocol = "tcp" 179 | to_port = 27017 180 | cidr_blocks = [local.additional_cidr_block] 181 | description = "Allow Mongodb outbound traffic." 182 | }] 183 | } 184 | 185 | module "http_https" { 186 | source = "clouddrove/security-group/aws" 187 | version = "2.0.0" 188 | 189 | name = "${local.name}-http-https" 190 | environment = local.environment 191 | 192 | vpc_id = module.vpc.vpc_id 193 | ## INGRESS Rules 194 | new_sg_ingress_rules_with_cidr_blocks = [{ 195 | rule_count = 1 196 | from_port = 22 197 | protocol = "tcp" 198 | to_port = 22 199 | cidr_blocks = [local.vpc_cidr_block] 200 | description = "Allow ssh traffic." 201 | }, 202 | { 203 | rule_count = 2 204 | from_port = 80 205 | protocol = "tcp" 206 | to_port = 80 207 | cidr_blocks = [local.vpc_cidr_block] 208 | description = "Allow http traffic." 209 | }, 210 | { 211 | rule_count = 3 212 | from_port = 443 213 | protocol = "tcp" 214 | to_port = 443 215 | cidr_blocks = [local.vpc_cidr_block] 216 | description = "Allow https traffic." 217 | } 218 | ] 219 | 220 | ## EGRESS Rules 221 | new_sg_egress_rules_with_cidr_blocks = [{ 222 | rule_count = 1 223 | from_port = 0 224 | protocol = "-1" 225 | to_port = 0 226 | cidr_blocks = ["0.0.0.0/0"] 227 | ipv6_cidr_blocks = ["::/0"] 228 | description = "Allow all traffic." 229 | } 230 | ] 231 | } 232 | 233 | ################################################################################ 234 | # KMS Module call 235 | ################################################################################ 236 | module "kms" { 237 | source = "clouddrove/kms/aws" 238 | version = "1.3.0" 239 | 240 | name = "${local.name}-kms" 241 | environment = local.environment 242 | label_order = local.label_order 243 | enabled = true 244 | description = "KMS key for EBS of EKS nodes" 245 | enable_key_rotation = true 246 | policy = data.aws_iam_policy_document.kms.json 247 | } 248 | 249 | data "aws_iam_policy_document" "kms" { 250 | version = "2012-10-17" 251 | statement { 252 | sid = "Enable IAM User Permissions" 253 | effect = "Allow" 254 | principals { 255 | type = "AWS" 256 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 257 | } 258 | actions = ["kms:*"] 259 | resources = ["*"] 260 | } 261 | } 262 | 263 | data "aws_caller_identity" "current" {} 264 | 265 | ################################################################################ 266 | # EKS Module call 267 | ################################################################################ 268 | module "eks" { 269 | source = "../.." 270 | enabled = true 271 | 272 | name = local.name 273 | environment = local.environment 274 | label_order = local.label_order 275 | 276 | # EKS 277 | kubernetes_version = "1.32" 278 | endpoint_public_access = true 279 | # Networking 280 | vpc_id = module.vpc.vpc_id 281 | subnet_ids = module.subnets.private_subnet_id 282 | allowed_security_groups = [module.ssh.security_group_id] 283 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 284 | allowed_cidr_blocks = [local.vpc_cidr_block] 285 | 286 | # AWS Managed Node Group 287 | # Node Groups Defaults Values It will Work all Node Groups 288 | managed_node_group_defaults = { 289 | subnet_ids = module.subnets.private_subnet_id 290 | nodes_additional_security_group_ids = [module.ssh.security_group_id] 291 | tags = { 292 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 293 | "k8s.io/cluster/${module.eks.cluster_name}" = "shared" 294 | } 295 | block_device_mappings = { 296 | xvda = { 297 | device_name = "/dev/xvda" 298 | ebs = { 299 | volume_size = 50 300 | volume_type = "gp3" 301 | iops = 3000 302 | throughput = 150 303 | encrypted = true 304 | kms_key_id = module.kms.key_arn 305 | } 306 | } 307 | } 308 | } 309 | managed_node_group = { 310 | critical = { 311 | name = "${module.eks.cluster_name}-critical" 312 | capacity_type = "SPOT" 313 | min_size = 1 314 | max_size = 2 315 | desired_size = 2 316 | instance_types = ["t3.medium"] 317 | ami_type = "BOTTLEROCKET_x86_64" 318 | } 319 | 320 | application = { 321 | name = "${module.eks.cluster_name}-application" 322 | capacity_type = "SPOT" 323 | min_size = 1 324 | max_size = 2 325 | desired_size = 1 326 | force_update_version = true 327 | instance_types = ["t3.medium"] 328 | ami_type = "BOTTLEROCKET_x86_64" 329 | } 330 | } 331 | # -- Set this to `true` only when you have correct iam_user details. 332 | apply_config_map_aws_auth = true 333 | map_additional_iam_users = [ 334 | { 335 | userarn = "arn:aws:iam::123456789:user/hello@clouddrove.com" 336 | username = "hello@clouddrove.com" 337 | groups = ["system:masters"] 338 | } 339 | ] 340 | #fargate profile 341 | fargate_enabled = true 342 | fargate_profiles = { 343 | profile-0 = { 344 | addon_name = "0" 345 | namespace = "default" 346 | } 347 | } 348 | } 349 | ## Kubernetes provider configuration 350 | data "aws_eks_cluster" "this" { 351 | depends_on = [module.eks] 352 | name = module.eks.cluster_id 353 | } 354 | 355 | data "aws_eks_cluster_auth" "this" { 356 | depends_on = [module.eks] 357 | name = module.eks.cluster_certificate_authority_data 358 | } 359 | 360 | provider "kubernetes" { 361 | host = data.aws_eks_cluster.this.endpoint 362 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 363 | token = data.aws_eks_cluster_auth.this.token 364 | } 365 | -------------------------------------------------------------------------------- /examples/aws_managed_with_fargate/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/clouddrove/terraform-aws-eks/0245d6b7aaaa7c499a5aa5440d2d785f594c5f9c/examples/aws_managed_with_fargate/output.tf -------------------------------------------------------------------------------- /examples/aws_managed_with_fargate/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /examples/complete/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | locals { 5 | name = "clouddrove-eks" 6 | region = "eu-west-1" 7 | vpc_cidr_block = module.vpc.vpc_cidr_block 8 | additional_cidr_block = "172.16.0.0/16" 9 | environment = "test" 10 | label_order = ["name", "environment"] 11 | tags = { 12 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 13 | } 14 | } 15 | 16 | ################################################################################ 17 | # VPC module call 18 | ################################################################################ 19 | module "vpc" { 20 | source = "clouddrove/vpc/aws" 21 | version = "2.0.0" 22 | 23 | name = "${local.name}-vpc" 24 | environment = local.environment 25 | cidr_block = "10.10.0.0/16" 26 | } 27 | 28 | ################################################################################ 29 | # Subnets module call 30 | ################################################################################ 31 | module "subnets" { 32 | source = "clouddrove/subnet/aws" 33 | version = "2.0.0" 34 | 35 | name = "${local.name}-subnets" 36 | environment = local.environment 37 | 38 | nat_gateway_enabled = true 39 | availability_zones = ["${local.region}a", "${local.region}b"] 40 | vpc_id = module.vpc.vpc_id 41 | cidr_block = module.vpc.vpc_cidr_block 42 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 43 | type = "public-private" 44 | igw_id = module.vpc.igw_id 45 | 46 | extra_public_tags = { 47 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 48 | "kubernetes.io/role/elb" = "1" 49 | } 50 | 51 | extra_private_tags = { 52 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 53 | "kubernetes.io/role/internal-elb" = "1" 54 | } 55 | 56 | public_inbound_acl_rules = [ 57 | { 58 | rule_number = 100 59 | rule_action = "allow" 60 | from_port = 0 61 | to_port = 0 62 | protocol = "-1" 63 | cidr_block = "0.0.0.0/0" 64 | }, 65 | { 66 | rule_number = 101 67 | rule_action = "allow" 68 | from_port = 0 69 | to_port = 0 70 | protocol = "-1" 71 | ipv6_cidr_block = "::/0" 72 | }, 73 | ] 74 | 75 | public_outbound_acl_rules = [ 76 | { 77 | rule_number = 100 78 | rule_action = "allow" 79 | from_port = 0 80 | to_port = 0 81 | protocol = "-1" 82 | cidr_block = "0.0.0.0/0" 83 | }, 84 | { 85 | rule_number = 101 86 | rule_action = "allow" 87 | from_port = 0 88 | to_port = 0 89 | protocol = "-1" 90 | ipv6_cidr_block = "::/0" 91 | }, 92 | ] 93 | 94 | private_inbound_acl_rules = [ 95 | { 96 | rule_number = 100 97 | rule_action = "allow" 98 | from_port = 0 99 | to_port = 0 100 | protocol = "-1" 101 | cidr_block = "0.0.0.0/0" 102 | }, 103 | { 104 | rule_number = 101 105 | rule_action = "allow" 106 | from_port = 0 107 | to_port = 0 108 | protocol = "-1" 109 | ipv6_cidr_block = "::/0" 110 | }, 111 | ] 112 | 113 | private_outbound_acl_rules = [ 114 | { 115 | rule_number = 100 116 | rule_action = "allow" 117 | from_port = 0 118 | to_port = 0 119 | protocol = "-1" 120 | cidr_block = "0.0.0.0/0" 121 | }, 122 | { 123 | rule_number = 101 124 | rule_action = "allow" 125 | from_port = 0 126 | to_port = 0 127 | protocol = "-1" 128 | ipv6_cidr_block = "::/0" 129 | }, 130 | ] 131 | } 132 | 133 | 134 | # ################################################################################ 135 | # Security Groups module call 136 | ################################################################################ 137 | module "ssh" { 138 | source = "clouddrove/security-group/aws" 139 | version = "2.0.0" 140 | 141 | name = "${local.name}-ssh" 142 | environment = local.environment 143 | 144 | vpc_id = module.vpc.vpc_id 145 | new_sg_ingress_rules_with_cidr_blocks = [{ 146 | rule_count = 1 147 | from_port = 22 148 | protocol = "tcp" 149 | to_port = 22 150 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 151 | description = "Allow ssh traffic." 152 | }, 153 | { 154 | rule_count = 2 155 | from_port = 27017 156 | protocol = "tcp" 157 | to_port = 27017 158 | cidr_blocks = [local.additional_cidr_block] 159 | description = "Allow Mongodb traffic." 160 | } 161 | ] 162 | 163 | ## EGRESS Rules 164 | new_sg_egress_rules_with_cidr_blocks = [{ 165 | rule_count = 1 166 | from_port = 22 167 | protocol = "tcp" 168 | to_port = 22 169 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 170 | description = "Allow ssh outbound traffic." 171 | }, 172 | { 173 | rule_count = 2 174 | from_port = 27017 175 | protocol = "tcp" 176 | to_port = 27017 177 | cidr_blocks = [local.additional_cidr_block] 178 | description = "Allow Mongodb outbound traffic." 179 | }] 180 | } 181 | 182 | module "http_https" { 183 | source = "clouddrove/security-group/aws" 184 | version = "2.0.0" 185 | 186 | name = "${local.name}-http-https" 187 | environment = local.environment 188 | 189 | vpc_id = module.vpc.vpc_id 190 | ## INGRESS Rules 191 | new_sg_ingress_rules_with_cidr_blocks = [{ 192 | rule_count = 1 193 | from_port = 22 194 | protocol = "tcp" 195 | to_port = 22 196 | cidr_blocks = [module.vpc.vpc_cidr_block] 197 | description = "Allow ssh traffic." 198 | }, 199 | { 200 | rule_count = 2 201 | from_port = 80 202 | protocol = "tcp" 203 | to_port = 80 204 | cidr_blocks = [module.vpc.vpc_cidr_block] 205 | description = "Allow http traffic." 206 | }, 207 | { 208 | rule_count = 3 209 | from_port = 443 210 | protocol = "tcp" 211 | to_port = 443 212 | cidr_blocks = [module.vpc.vpc_cidr_block] 213 | description = "Allow https traffic." 214 | } 215 | ] 216 | 217 | ## EGRESS Rules 218 | new_sg_egress_rules_with_cidr_blocks = [{ 219 | rule_count = 1 220 | from_port = 0 221 | protocol = "-1" 222 | to_port = 0 223 | cidr_blocks = ["0.0.0.0/0"] 224 | ipv6_cidr_blocks = ["::/0"] 225 | description = "Allow all traffic." 226 | } 227 | ] 228 | } 229 | 230 | ################################################################################ 231 | # KMS Module call 232 | ################################################################################ 233 | module "kms" { 234 | source = "clouddrove/kms/aws" 235 | version = "1.3.0" 236 | 237 | name = "${local.name}-kms" 238 | environment = local.environment 239 | label_order = local.label_order 240 | enabled = true 241 | description = "KMS key for EBS of EKS nodes" 242 | enable_key_rotation = false 243 | policy = data.aws_iam_policy_document.kms.json 244 | } 245 | 246 | data "aws_iam_policy_document" "kms" { 247 | version = "2012-10-17" 248 | statement { 249 | sid = "Enable IAM User Permissions" 250 | effect = "Allow" 251 | principals { 252 | type = "AWS" 253 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 254 | } 255 | actions = ["kms:*"] 256 | resources = ["*"] 257 | } 258 | } 259 | 260 | data "aws_caller_identity" "current" {} 261 | 262 | ################################################################################ 263 | # EKS Module call 264 | ################################################################################ 265 | module "eks" { 266 | source = "../.." 267 | 268 | name = local.name 269 | environment = local.environment 270 | enabled = true 271 | 272 | kubernetes_version = "1.32" 273 | endpoint_private_access = true 274 | endpoint_public_access = true 275 | 276 | # Networking 277 | vpc_id = module.vpc.vpc_id 278 | subnet_ids = module.subnets.private_subnet_id 279 | allowed_security_groups = [module.ssh.security_group_id] 280 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 281 | allowed_cidr_blocks = [local.vpc_cidr_block] 282 | 283 | # Self Managed Node Group 284 | # Node Groups Defaults Values It will Work all Node Groups 285 | self_node_group_defaults = { 286 | subnet_ids = module.subnets.private_subnet_id 287 | propagate_tags = [{ 288 | key = "aws-node-termination-handler/managed" 289 | value = true 290 | propagate_at_launch = true 291 | }, 292 | { 293 | key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" 294 | value = "owned" 295 | propagate_at_launch = true 296 | } 297 | ] 298 | block_device_mappings = { 299 | xvda = { 300 | device_name = "/dev/xvda" 301 | ebs = { 302 | volume_size = 50 303 | volume_type = "gp3" 304 | iops = 3000 305 | throughput = 150 306 | encrypted = true 307 | kms_key_id = module.kms.key_arn 308 | } 309 | } 310 | } 311 | } 312 | self_node_groups = { 313 | self_managed_critical = { 314 | name = "self_managed_critical" 315 | min_size = 1 316 | max_size = 2 317 | desired_size = 1 318 | bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" 319 | instance_type = "t3.medium" 320 | } 321 | self_managed_application = { 322 | name = "self_managed_application" 323 | instance_market_options = { 324 | market_type = "spot" 325 | } 326 | min_size = 1 327 | max_size = 2 328 | desired_size = 1 329 | bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" 330 | instance_type = "t3.medium" 331 | } 332 | # Schdule EKS Managed Auto Scaling node group. Change start_time and end_time. 333 | schedules = { 334 | scale-up = { 335 | min_size = 2 336 | max_size = 2 # Retains current max size 337 | desired_size = 2 338 | start_time = "2023-09-15T19:00:00Z" 339 | end_time = "2023-09-19T19:00:00Z" 340 | timezone = "Europe/Amsterdam" 341 | recurrence = "0 7 * * 1" 342 | }, 343 | scale-down = { 344 | min_size = 0 345 | max_size = 0 # Retains current max size 346 | desired_size = 0 347 | start_time = "2023-09-12T12:00:00Z" 348 | end_time = "2024-03-05T12:00:00Z" 349 | timezone = "Europe/Amsterdam" 350 | recurrence = "0 7 * * 5" 351 | } 352 | } 353 | 354 | } 355 | # AWS Managed Node Group 356 | # Node Groups Defaults Values It will Work all Node Groups 357 | managed_node_group_defaults = { 358 | subnet_ids = module.subnets.private_subnet_id 359 | nodes_additional_security_group_ids = [module.ssh.security_group_id] 360 | tags = { 361 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 362 | "k8s.io/cluster/${module.eks.cluster_name}" = "shared" 363 | } 364 | block_device_mappings = { 365 | xvda = { 366 | device_name = "/dev/xvda" 367 | ebs = { 368 | volume_size = 50 369 | volume_type = "gp3" 370 | iops = 3000 371 | throughput = 150 372 | encrypted = true 373 | kms_key_id = module.kms.key_arn 374 | } 375 | } 376 | } 377 | } 378 | managed_node_group = { 379 | critical = { 380 | name = "${module.eks.cluster_name}-critical" 381 | min_size = 1 382 | max_size = 2 383 | desired_size = 1 384 | instance_types = ["t3.medium"] 385 | ami_type = "BOTTLEROCKET_x86_64" 386 | } 387 | application = { 388 | name = "${module.eks.cluster_name}-application" 389 | capacity_type = "SPOT" 390 | 391 | min_size = 1 392 | max_size = 2 393 | desired_size = 1 394 | force_update_version = true 395 | instance_types = ["t3.medium"] 396 | ami_type = "BOTTLEROCKET_x86_64" 397 | } 398 | } 399 | apply_config_map_aws_auth = true 400 | map_additional_iam_users = [ 401 | { 402 | userarn = "arn:aws:iam::123456789:user/hello@clouddrove.com" 403 | username = "hello@clouddrove.com" 404 | groups = ["system:masters"] 405 | } 406 | ] 407 | # Schdule EKS Managed Auto Scaling node group 408 | schedules = { 409 | scale-up = { 410 | min_size = 2 411 | max_size = 2 # Retains current max size 412 | desired_size = 2 413 | start_time = "2023-09-15T19:00:00Z" 414 | end_time = "2023-09-19T19:00:00Z" 415 | timezone = "Europe/Amsterdam" 416 | recurrence = "0 7 * * 1" 417 | }, 418 | scale-down = { 419 | min_size = 0 420 | max_size = 0 # Retains current max size 421 | desired_size = 0 422 | start_time = "2023-09-12T12:00:00Z" 423 | end_time = "2024-03-05T12:00:00Z" 424 | timezone = "Europe/Amsterdam" 425 | recurrence = "0 7 * * 5" 426 | } 427 | } 428 | } 429 | 430 | # Kubernetes provider configuration 431 | data "aws_eks_cluster" "this" { 432 | name = module.eks.cluster_id 433 | } 434 | 435 | data "aws_eks_cluster_auth" "this" { 436 | name = module.eks.cluster_certificate_authority_data 437 | } 438 | provider "kubernetes" { 439 | host = data.aws_eks_cluster.this.endpoint 440 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 441 | token = data.aws_eks_cluster_auth.this.token 442 | } -------------------------------------------------------------------------------- /examples/complete/output.tf: -------------------------------------------------------------------------------- 1 | output "eks_name" { 2 | value = module.eks.cluster_id 3 | } 4 | 5 | output "node_iam_role_name" { 6 | value = module.eks.node_group_iam_role_name 7 | } 8 | 9 | output "tags" { 10 | value = module.eks.tags 11 | } -------------------------------------------------------------------------------- /examples/complete/versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | cloudinit = { 11 | source = "hashicorp/cloudinit" 12 | version = ">= 2.0" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /examples/self_managed/example.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | locals { 5 | name = "clouddrove-eks" 6 | region = "eu-west-1" 7 | vpc_cidr_block = module.vpc.vpc_cidr_block 8 | additional_cidr_block = "172.16.0.0/16" 9 | environment = "test" 10 | label_order = ["name", "environment"] 11 | tags = { 12 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 13 | } 14 | } 15 | 16 | ################################################################################ 17 | # VPC module call 18 | ################################################################################ 19 | module "vpc" { 20 | source = "clouddrove/vpc/aws" 21 | version = "2.0.0" 22 | 23 | name = "${local.name}-vpc" 24 | environment = local.environment 25 | cidr_block = "10.10.0.0/16" 26 | } 27 | 28 | ################################################################################ 29 | # Subnets 30 | ################################################################################ 31 | module "subnets" { 32 | source = "clouddrove/subnet/aws" 33 | version = "2.0.0" 34 | 35 | name = "${local.name}-subnets" 36 | environment = local.environment 37 | nat_gateway_enabled = true 38 | availability_zones = ["${local.region}a", "${local.region}b"] 39 | vpc_id = module.vpc.vpc_id 40 | cidr_block = module.vpc.vpc_cidr_block 41 | ipv6_cidr_block = module.vpc.ipv6_cidr_block 42 | type = "public-private" 43 | igw_id = module.vpc.igw_id 44 | 45 | extra_public_tags = { 46 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 47 | "kubernetes.io/role/elb" = "1" 48 | } 49 | 50 | extra_private_tags = { 51 | "kubernetes.io/cluster/${module.eks.cluster_name}" = "shared" 52 | "kubernetes.io/role/internal-elb" = "1" 53 | } 54 | 55 | public_inbound_acl_rules = [ 56 | { 57 | rule_number = 100 58 | rule_action = "allow" 59 | from_port = 0 60 | to_port = 0 61 | protocol = "-1" 62 | cidr_block = "0.0.0.0/0" 63 | }, 64 | { 65 | rule_number = 101 66 | rule_action = "allow" 67 | from_port = 0 68 | to_port = 0 69 | protocol = "-1" 70 | ipv6_cidr_block = "::/0" 71 | }, 72 | ] 73 | public_outbound_acl_rules = [ 74 | { 75 | rule_number = 100 76 | rule_action = "allow" 77 | from_port = 0 78 | to_port = 0 79 | protocol = "-1" 80 | cidr_block = "0.0.0.0/0" 81 | }, 82 | { 83 | rule_number = 101 84 | rule_action = "allow" 85 | from_port = 0 86 | to_port = 0 87 | protocol = "-1" 88 | ipv6_cidr_block = "::/0" 89 | }, 90 | ] 91 | private_inbound_acl_rules = [ 92 | { 93 | rule_number = 100 94 | rule_action = "allow" 95 | from_port = 0 96 | to_port = 0 97 | protocol = "-1" 98 | cidr_block = "0.0.0.0/0" 99 | }, 100 | { 101 | rule_number = 101 102 | rule_action = "allow" 103 | from_port = 0 104 | to_port = 0 105 | protocol = "-1" 106 | ipv6_cidr_block = "::/0" 107 | }, 108 | ] 109 | private_outbound_acl_rules = [ 110 | { 111 | rule_number = 100 112 | rule_action = "allow" 113 | from_port = 0 114 | to_port = 0 115 | protocol = "-1" 116 | cidr_block = "0.0.0.0/0" 117 | }, 118 | { 119 | rule_number = 101 120 | rule_action = "allow" 121 | from_port = 0 122 | to_port = 0 123 | protocol = "-1" 124 | ipv6_cidr_block = "::/0" 125 | }, 126 | ] 127 | } 128 | 129 | # ################################################################################ 130 | # Security Groups 131 | ################################################################################ 132 | 133 | module "ssh" { 134 | source = "clouddrove/security-group/aws" 135 | version = "2.0.0" 136 | 137 | name = "${local.name}-ssh" 138 | environment = local.environment 139 | vpc_id = module.vpc.vpc_id 140 | new_sg_ingress_rules_with_cidr_blocks = [{ 141 | rule_count = 1 142 | from_port = 22 143 | protocol = "tcp" 144 | to_port = 22 145 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 146 | description = "Allow ssh traffic." 147 | }, 148 | { 149 | rule_count = 2 150 | from_port = 27017 151 | protocol = "tcp" 152 | to_port = 27017 153 | cidr_blocks = [local.additional_cidr_block] 154 | description = "Allow Mongodb traffic." 155 | } 156 | ] 157 | ## EGRESS Rules 158 | new_sg_egress_rules_with_cidr_blocks = [{ 159 | rule_count = 1 160 | from_port = 22 161 | protocol = "tcp" 162 | to_port = 22 163 | cidr_blocks = [module.vpc.vpc_cidr_block, local.additional_cidr_block] 164 | description = "Allow ssh outbound traffic." 165 | }, 166 | { 167 | rule_count = 2 168 | from_port = 27017 169 | protocol = "tcp" 170 | to_port = 27017 171 | cidr_blocks = [local.additional_cidr_block] 172 | description = "Allow Mongodb outbound traffic." 173 | }] 174 | } 175 | 176 | module "http_https" { 177 | source = "clouddrove/security-group/aws" 178 | version = "2.0.0" 179 | 180 | name = "${local.name}-http-https" 181 | environment = local.environment 182 | vpc_id = module.vpc.vpc_id 183 | ## INGRESS Rules 184 | new_sg_ingress_rules_with_cidr_blocks = [{ 185 | rule_count = 1 186 | from_port = 22 187 | protocol = "tcp" 188 | to_port = 22 189 | cidr_blocks = [module.vpc.vpc_cidr_block] 190 | description = "Allow ssh traffic." 191 | }, 192 | { 193 | rule_count = 2 194 | from_port = 80 195 | protocol = "http" 196 | to_port = 80 197 | cidr_blocks = [module.vpc.vpc_cidr_block] 198 | description = "Allow http traffic." 199 | }, 200 | { 201 | rule_count = 3 202 | from_port = 443 203 | protocol = "https" 204 | to_port = 443 205 | cidr_blocks = [module.vpc.vpc_cidr_block] 206 | description = "Allow https traffic." 207 | } 208 | ] 209 | 210 | ## EGRESS Rules 211 | new_sg_egress_rules_with_cidr_blocks = [{ 212 | rule_count = 1 213 | from_port = 0 214 | protocol = "-1" 215 | to_port = 0 216 | cidr_blocks = ["0.0.0.0/0"] 217 | ipv6_cidr_blocks = ["::/0"] 218 | description = "Allow all traffic." 219 | } 220 | ] 221 | } 222 | 223 | ################################################################################ 224 | # EKS Module call 225 | ################################################################################ 226 | module "eks" { 227 | source = "../.." 228 | 229 | name = local.name 230 | environment = "test" 231 | 232 | # EKS 233 | kubernetes_version = "1.32" 234 | endpoint_private_access = true 235 | endpoint_public_access = true 236 | # Networking 237 | vpc_id = module.vpc.vpc_id 238 | subnet_ids = module.subnets.private_subnet_id 239 | allowed_security_groups = [module.ssh.security_group_id] 240 | eks_additional_security_group_ids = ["${module.ssh.security_group_id}", "${module.http_https.security_group_id}"] 241 | allowed_cidr_blocks = [local.vpc_cidr_block] 242 | 243 | # Self Managed Node Grou 244 | # Node Groups Defaults Values It will Work all Node Groups 245 | self_node_group_defaults = { 246 | subnet_ids = module.subnets.private_subnet_id 247 | propagate_tags = [{ 248 | key = "aws-node-termination-handler/managed" 249 | value = true 250 | propagate_at_launch = true 251 | }, 252 | { 253 | key = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}" 254 | value = "owned" 255 | propagate_at_launch = true 256 | 257 | } 258 | ] 259 | 260 | block_device_mappings = { 261 | xvda = { 262 | device_name = "/dev/xvda" 263 | ebs = { 264 | volume_size = 50 265 | volume_type = "gp3" 266 | iops = 3000 267 | throughput = 150 268 | } 269 | } 270 | } 271 | } 272 | 273 | self_node_groups = { 274 | critical = { 275 | name = "${module.eks.cluster_name}-critical" 276 | min_size = 1 277 | max_size = 7 278 | desired_size = 1 279 | bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'" 280 | instance_type = "t3.medium" 281 | } 282 | application = { 283 | name = "${module.eks.cluster_name}-application" 284 | instance_market_options = { 285 | market_type = "spot" 286 | } 287 | min_size = 1 288 | max_size = 7 289 | desired_size = 1 290 | bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" 291 | instance_type = "t3.medium" 292 | } 293 | } 294 | # Schdule Self Managed Auto Scaling node group 295 | schedules = { 296 | scale-up = { 297 | min_size = 2 298 | max_size = 2 # Retains current max size 299 | desired_size = 2 300 | start_time = "2023-08-15T19:00:00Z" 301 | end_time = "2023-08-19T19:00:00Z" 302 | timezone = "Europe/Amsterdam" 303 | recurrence = "0 7 * * 1" 304 | }, 305 | scale-down = { 306 | min_size = 0 307 | max_size = 0 # Retains current max size 308 | desired_size = 0 309 | start_time = "2023-08-12T12:00:00Z" 310 | end_time = "2024-03-05T12:00:00Z" 311 | timezone = "Europe/Amsterdam" 312 | recurrence = "0 7 * * 5" 313 | } 314 | } 315 | } 316 | # Kubernetes provider configuration 317 | data "aws_eks_cluster" "this" { 318 | name = module.eks.cluster_id 319 | } 320 | 321 | data "aws_eks_cluster_auth" "this" { 322 | name = module.eks.cluster_certificate_authority_data 323 | } 324 | # 325 | provider "kubernetes" { 326 | host = data.aws_eks_cluster.this.endpoint 327 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.this.certificate_authority[0].data) 328 | token = data.aws_eks_cluster_auth.this.token 329 | } -------------------------------------------------------------------------------- /examples/self_managed/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/clouddrove/terraform-aws-eks/0245d6b7aaaa7c499a5aa5440d2d785f594c5f9c/examples/self_managed/output.tf -------------------------------------------------------------------------------- /examples/self_managed/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.5.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.11.0" 8 | } 9 | cloudinit = { 10 | source = "hashicorp/cloudinit" 11 | version = ">= 2.0" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /fargate_profile.tf: -------------------------------------------------------------------------------- 1 | module "fargate" { 2 | source = "./node_group/fargate_profile" 3 | 4 | name = var.name 5 | environment = var.environment 6 | label_order = var.label_order 7 | enabled = var.enabled 8 | fargate_enabled = var.fargate_enabled 9 | cluster_name = aws_eks_cluster.default[0].name 10 | fargate_profiles = var.fargate_profiles 11 | subnet_ids = var.subnet_ids 12 | 13 | } -------------------------------------------------------------------------------- /iam.tf: -------------------------------------------------------------------------------- 1 | 2 | data "aws_iam_policy_document" "assume_role" { 3 | count = var.enabled ? 1 : 0 4 | 5 | statement { 6 | effect = "Allow" 7 | actions = ["sts:AssumeRole"] 8 | 9 | principals { 10 | type = "Service" 11 | identifiers = ["eks.amazonaws.com"] 12 | } 13 | } 14 | } 15 | 16 | resource "aws_iam_role" "default" { 17 | count = var.enabled ? 1 : 0 18 | 19 | name = module.labels.id 20 | assume_role_policy = data.aws_iam_policy_document.assume_role[0].json 21 | permissions_boundary = var.permissions_boundary 22 | 23 | tags = module.labels.tags 24 | } 25 | 26 | resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" { 27 | count = var.enabled ? 1 : 0 28 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", data.aws_partition.current.partition) 29 | role = aws_iam_role.default[0].name 30 | } 31 | 32 | resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" { 33 | count = var.enabled ? 1 : 0 34 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", data.aws_partition.current.partition) 35 | role = aws_iam_role.default[0].name 36 | } 37 | 38 | data "aws_iam_policy_document" "service_role" { 39 | count = var.enabled ? 1 : 0 40 | 41 | statement { 42 | effect = "Allow" 43 | actions = [ 44 | "ec2:DescribeInternetGateways", 45 | "elasticloadbalancing:SetIpAddressType", 46 | "elasticloadbalancing:SetSubnets", 47 | "ec2:DescribeAccountAttributes", 48 | "ec2:DescribeAddresses", 49 | ] 50 | resources = ["*"] 51 | } 52 | } 53 | 54 | resource "aws_iam_role_policy" "service_role" { 55 | count = var.enabled ? 1 : 0 56 | role = aws_iam_role.default[0].name 57 | policy = data.aws_iam_policy_document.service_role[0].json 58 | 59 | name = module.labels.id 60 | 61 | } 62 | 63 | 64 | #-------------------------------------------------------IAM FOR node Group---------------------------------------------- 65 | 66 | #Module : IAM ROLE 67 | #Description : Provides an IAM role. 68 | resource "aws_iam_role" "node_groups" { 69 | count = var.enabled ? 1 : 0 70 | name = "${module.labels.id}-node_group" 71 | assume_role_policy = data.aws_iam_policy_document.node_group[0].json 72 | tags = module.labels.tags 73 | } 74 | 75 | #Module : IAM ROLE POLICY ATTACHMENT CNI 76 | #Description : Attaches a Managed IAM Policy to an IAM role. 77 | resource "aws_iam_role_policy_attachment" "amazon_eks_cni_policy" { 78 | count = var.enabled ? 1 : 0 79 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" 80 | role = aws_iam_role.node_groups[0].name 81 | } 82 | 83 | resource "aws_iam_role_policy_attachment" "additional" { 84 | for_each = { for k, v in var.iam_role_additional_policies : k => v if var.enabled } 85 | 86 | policy_arn = each.value 87 | role = aws_iam_role.node_groups[0].name 88 | } 89 | 90 | #Module : IAM ROLE POLICY ATTACHMENT EC2 CONTAINER REGISTRY READ ONLY 91 | #Description : Attaches a Managed IAM Policy to an IAM role. 92 | resource "aws_iam_role_policy_attachment" "amazon_ec2_container_registry_read_only" { 93 | count = var.enabled ? 1 : 0 94 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" 95 | role = aws_iam_role.node_groups[0].name 96 | } 97 | 98 | resource "aws_iam_policy" "amazon_eks_node_group_autoscaler_policy" { 99 | count = var.enabled ? 1 : 0 100 | name = format("%s-node-group-policy", module.labels.id) 101 | policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy[0].json 102 | } 103 | 104 | resource "aws_iam_role_policy_attachment" "amazon_eks_node_group_autoscaler_policy" { 105 | count = var.enabled ? 1 : 0 106 | policy_arn = aws_iam_policy.amazon_eks_node_group_autoscaler_policy[0].arn 107 | role = aws_iam_role.node_groups[0].name 108 | } 109 | 110 | resource "aws_iam_policy" "amazon_eks_worker_node_autoscaler_policy" { 111 | count = var.enabled ? 1 : 0 112 | name = "${module.labels.id}-autoscaler" 113 | path = "/" 114 | policy = data.aws_iam_policy_document.amazon_eks_node_group_autoscaler_policy[0].json 115 | } 116 | 117 | resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_autoscaler_policy" { 118 | count = var.enabled ? 1 : 0 119 | policy_arn = aws_iam_policy.amazon_eks_worker_node_autoscaler_policy[0].arn 120 | role = aws_iam_role.node_groups[0].name 121 | } 122 | 123 | resource "aws_iam_role_policy_attachment" "amazon_eks_worker_node_policy" { 124 | count = var.enabled ? 1 : 0 125 | policy_arn = format("%s/%s", local.aws_policy_prefix, "AmazonEKSWorkerNodePolicy") 126 | role = aws_iam_role.node_groups[0].name 127 | } 128 | 129 | data "aws_iam_policy_document" "node_group" { 130 | count = var.enabled ? 1 : 0 131 | 132 | statement { 133 | effect = "Allow" 134 | actions = ["sts:AssumeRole"] 135 | 136 | principals { 137 | type = "Service" 138 | identifiers = ["ec2.amazonaws.com"] 139 | } 140 | } 141 | } 142 | 143 | # Autoscaler policy for node group 144 | data "aws_iam_policy_document" "amazon_eks_node_group_autoscaler_policy" { 145 | count = var.enabled ? 1 : 0 146 | 147 | statement { 148 | effect = "Allow" 149 | actions = [ 150 | "autoscaling:DescribeAutoScalingGroups", 151 | "autoscaling:DescribeAutoScalingInstances", 152 | "autoscaling:DescribeLaunchConfigurations", 153 | "autoscaling:DescribeTags", 154 | "autoscaling:SetDesiredCapacity", 155 | "autoscaling:TerminateInstanceInAutoScalingGroup", 156 | "autoscaling:TerminateInstanceInAutoScalingGroup", 157 | "ec2:DescribeLaunchTemplateVersions", 158 | "ecr:*" 159 | ] 160 | resources = ["*"] 161 | } 162 | } 163 | 164 | #Module : IAM INSTANCE PROFILE 165 | #Description : Provides an IAM instance profile. 166 | resource "aws_iam_instance_profile" "default" { 167 | count = var.enabled ? 1 : 0 168 | name = format("%s-instance-profile", module.labels.id) 169 | role = aws_iam_role.node_groups[0].name 170 | } -------------------------------------------------------------------------------- /kms.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "cloudwatch" { 2 | policy_id = "key-policy-cloudwatch" 3 | statement { 4 | sid = "Enable IAM User Permissions" 5 | actions = [ 6 | "kms:*", 7 | ] 8 | effect = "Allow" 9 | principals { 10 | type = "AWS" 11 | identifiers = [ 12 | format( 13 | "arn:%s:iam::%s:root", 14 | data.aws_partition.current.partition, 15 | data.aws_caller_identity.current.account_id 16 | ) 17 | ] 18 | } 19 | resources = ["*"] 20 | } 21 | statement { 22 | sid = "AllowCloudWatchLogs" 23 | actions = [ 24 | "kms:Encrypt*", 25 | "kms:Decrypt*", 26 | "kms:ReEncrypt*", 27 | "kms:GenerateDataKey*", 28 | "kms:Describe*" 29 | ] 30 | effect = "Allow" 31 | principals { 32 | type = "Service" 33 | identifiers = [ 34 | format( 35 | "logs.%s.amazonaws.com", 36 | data.aws_region.current.name 37 | ) 38 | ] 39 | } 40 | resources = ["*"] 41 | } 42 | } 43 | 44 | resource "aws_kms_key" "cluster" { 45 | count = var.enabled && var.cluster_encryption_config_enabled ? 1 : 0 46 | description = "EKS Cluster ${module.labels.id} Encryption Config KMS Key" 47 | enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation 48 | deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days 49 | policy = var.cluster_encryption_config_kms_key_policy 50 | tags = module.labels.tags 51 | } 52 | 53 | resource "aws_kms_key" "cloudwatch_log" { 54 | count = var.enabled && var.cluster_encryption_config_enabled ? 1 : 0 55 | description = "CloudWatch log group ${module.labels.id} Encryption Config KMS Key" 56 | enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation 57 | deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days 58 | policy = data.aws_iam_policy_document.cloudwatch.json 59 | tags = module.labels.tags 60 | } -------------------------------------------------------------------------------- /kubeconfig.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | preferences: {} 4 | clusters: 5 | - cluster: 6 | server: ${server} 7 | certificate-authority-data: ${certificate_authority_data} 8 | name: ${cluster_name} 9 | contexts: 10 | - context: 11 | cluster: ${cluster_name} 12 | user: ${cluster_name} 13 | name: ${cluster_name} 14 | current-context: ${cluster_name} 15 | users: 16 | - name: ${cluster_name} 17 | user: 18 | exec: 19 | apiVersion: client.authentication.k8s.io/v1alpha1 20 | command: aws-iam-authenticator 21 | args: 22 | - "token" 23 | - "-i" 24 | - "${cluster_name}" -------------------------------------------------------------------------------- /locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Encryption 3 | cluster_encryption_config = { 4 | resources = var.cluster_encryption_config_resources 5 | provider_key_arn = var.enabled ? aws_kms_key.cluster[0].arn : null 6 | } 7 | aws_policy_prefix = format("arn:%s:iam::aws:policy", data.aws_partition.current.partition) 8 | create_outposts_local_cluster = length(var.outpost_config) > 0 9 | 10 | } 11 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | 2 | #Module : label 3 | #Description : Terraform module to create consistent naming for multiple names. 4 | 5 | module "labels" { 6 | source = "clouddrove/labels/aws" 7 | version = "1.3.0" 8 | 9 | name = var.name 10 | repository = var.repository 11 | environment = var.environment 12 | managedby = var.managedby 13 | attributes = compact(concat(var.attributes, ["cluster"])) 14 | extra_tags = var.tags 15 | label_order = var.label_order 16 | } 17 | 18 | #Cloudwatch: Logs for Eks cluster 19 | resource "aws_cloudwatch_log_group" "default" { 20 | count = var.enabled && length(var.enabled_cluster_log_types) > 0 ? 1 : 0 21 | name = "/aws/eks/${module.labels.id}/cluster" 22 | retention_in_days = var.cluster_log_retention_period 23 | tags = module.labels.tags 24 | kms_key_id = aws_kms_key.cloudwatch_log[0].arn 25 | } 26 | 27 | #tfsec:ignore:aws-eks-no-public-cluster-access ## To provide eks endpoint public access from local network 28 | #tfsec:ignore:aws-eks-no-public-cluster-access-to-cidr ## To provide eks endpoint public access from local network 29 | resource "aws_eks_cluster" "default" { 30 | count = var.enabled ? 1 : 0 31 | name = module.labels.id 32 | role_arn = aws_iam_role.default[0].arn 33 | version = var.kubernetes_version 34 | enabled_cluster_log_types = var.enabled_cluster_log_types 35 | 36 | vpc_config { 37 | subnet_ids = var.subnet_ids 38 | endpoint_private_access = var.endpoint_private_access 39 | endpoint_public_access = var.endpoint_public_access 40 | public_access_cidrs = var.public_access_cidrs 41 | security_group_ids = var.eks_additional_security_group_ids 42 | } 43 | 44 | dynamic "encryption_config" { 45 | for_each = var.cluster_encryption_config_enabled ? [local.cluster_encryption_config] : [] 46 | content { 47 | resources = lookup(encryption_config.value, "resources") 48 | provider { 49 | key_arn = lookup(encryption_config.value, "provider_key_arn") 50 | } 51 | } 52 | } 53 | 54 | timeouts { 55 | create = lookup(var.cluster_timeouts, "create", null) 56 | update = lookup(var.cluster_timeouts, "update", null) 57 | delete = lookup(var.cluster_timeouts, "delete", null) 58 | } 59 | 60 | dynamic "kubernetes_network_config" { 61 | # Not valid on Outposts 62 | for_each = local.create_outposts_local_cluster ? [] : [1] 63 | 64 | content { 65 | ip_family = var.cluster_ip_family 66 | service_ipv4_cidr = var.cluster_service_ipv4_cidr 67 | service_ipv6_cidr = var.cluster_service_ipv6_cidr 68 | } 69 | } 70 | 71 | dynamic "outpost_config" { 72 | for_each = local.create_outposts_local_cluster ? [var.outpost_config] : [] 73 | 74 | content { 75 | control_plane_instance_type = outpost_config.value.control_plane_instance_type 76 | outpost_arns = outpost_config.value.outpost_arns 77 | } 78 | } 79 | 80 | tags = merge( 81 | module.labels.tags, 82 | var.eks_tags 83 | ) 84 | 85 | depends_on = [ 86 | aws_iam_role_policy_attachment.amazon_eks_cluster_policy, 87 | aws_iam_role_policy_attachment.amazon_eks_service_policy, 88 | aws_cloudwatch_log_group.default, 89 | ] 90 | } 91 | 92 | data "tls_certificate" "cluster" { 93 | count = var.enabled && var.oidc_provider_enabled ? 1 : 0 94 | url = aws_eks_cluster.default[0].identity.0.oidc.0.issuer 95 | } 96 | 97 | resource "aws_iam_openid_connect_provider" "default" { 98 | count = var.enabled && var.oidc_provider_enabled ? 1 : 0 99 | url = aws_eks_cluster.default[0].identity.0.oidc.0.issuer 100 | 101 | client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences))) 102 | thumbprint_list = [data.tls_certificate.cluster[0].certificates.0.sha1_fingerprint] 103 | tags = module.labels.tags 104 | } 105 | 106 | resource "aws_eks_addon" "cluster" { 107 | for_each = var.enabled ? { for addon in var.addons : addon.addon_name => addon } : {} 108 | 109 | cluster_name = aws_eks_cluster.default[0].name 110 | addon_name = each.key 111 | addon_version = lookup(each.value, "addon_version", null) 112 | resolve_conflicts_on_create = lookup(each.value, "resolve_conflicts", null) 113 | resolve_conflicts_on_update = lookup(each.value, "resolve_conflicts", null) 114 | service_account_role_arn = lookup(each.value, "service_account_role_arn", null) 115 | 116 | tags = module.labels.tags 117 | } 118 | -------------------------------------------------------------------------------- /node_group/aws_managed/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | 3 | data "aws_caller_identity" "current" {} 4 | 5 | #Module : label 6 | #Description : Terraform module to create consistent naming for multiple names. 7 | module "labels" { 8 | source = "clouddrove/labels/aws" 9 | version = "1.3.0" 10 | 11 | name = var.name 12 | repository = var.repository 13 | environment = var.environment 14 | managedby = var.managedby 15 | extra_tags = var.tags 16 | attributes = compact(concat(var.attributes, ["nodes"])) 17 | label_order = var.label_order 18 | } 19 | 20 | 21 | ################################################################################ 22 | # Launch template 23 | ################################################################################ 24 | 25 | 26 | 27 | resource "aws_launch_template" "this" { 28 | count = var.enabled ? 1 : 0 29 | name = module.labels.id 30 | description = var.launch_template_description 31 | 32 | ebs_optimized = var.ebs_optimized 33 | image_id = var.ami_id 34 | # # Set on node group instead 35 | # instance_type = var.launch_template_instance_type 36 | key_name = var.key_name 37 | user_data = var.before_cluster_joining_userdata 38 | vpc_security_group_ids = var.vpc_security_group_ids 39 | 40 | disable_api_termination = var.disable_api_termination 41 | kernel_id = var.kernel_id 42 | ram_disk_id = var.ram_disk_id 43 | 44 | dynamic "block_device_mappings" { 45 | for_each = var.block_device_mappings 46 | content { 47 | device_name = block_device_mappings.value.device_name 48 | no_device = lookup(block_device_mappings.value, "no_device", null) 49 | virtual_name = lookup(block_device_mappings.value, "virtual_name", null) 50 | 51 | dynamic "ebs" { 52 | for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) 53 | content { 54 | delete_on_termination = true 55 | encrypted = true 56 | kms_key_id = var.kms_key_id 57 | iops = lookup(ebs.value, "iops", null) 58 | throughput = lookup(ebs.value, "throughput", null) 59 | snapshot_id = lookup(ebs.value, "snapshot_id", null) 60 | volume_size = lookup(ebs.value, "volume_size", null) 61 | volume_type = lookup(ebs.value, "volume_type", null) 62 | } 63 | } 64 | } 65 | } 66 | 67 | dynamic "capacity_reservation_specification" { 68 | for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : [] 69 | content { 70 | capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) 71 | 72 | dynamic "capacity_reservation_target" { 73 | for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", []) 74 | content { 75 | capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) 76 | } 77 | } 78 | } 79 | } 80 | 81 | dynamic "cpu_options" { 82 | for_each = var.cpu_options != null ? [var.cpu_options] : [] 83 | content { 84 | core_count = cpu_options.value.core_count 85 | threads_per_core = cpu_options.value.threads_per_core 86 | } 87 | } 88 | 89 | dynamic "credit_specification" { 90 | for_each = var.credit_specification != null ? [var.credit_specification] : [] 91 | content { 92 | cpu_credits = credit_specification.value.cpu_credits 93 | } 94 | } 95 | 96 | dynamic "elastic_gpu_specifications" { 97 | for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : [] 98 | content { 99 | type = elastic_gpu_specifications.value.type 100 | } 101 | } 102 | 103 | dynamic "elastic_inference_accelerator" { 104 | for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : [] 105 | content { 106 | type = elastic_inference_accelerator.value.type 107 | } 108 | } 109 | 110 | dynamic "enclave_options" { 111 | for_each = var.enclave_options != null ? [var.enclave_options] : [] 112 | content { 113 | enabled = enclave_options.value.enabled 114 | } 115 | } 116 | 117 | 118 | dynamic "license_specification" { 119 | for_each = var.license_specifications != null ? [var.license_specifications] : [] 120 | content { 121 | license_configuration_arn = license_specifications.value.license_configuration_arn 122 | } 123 | } 124 | 125 | dynamic "metadata_options" { 126 | for_each = var.metadata_options != null ? [var.metadata_options] : [] 127 | content { 128 | http_endpoint = lookup(metadata_options.value, "http_endpoint", null) 129 | http_tokens = lookup(metadata_options.value, "http_tokens", null) 130 | http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) 131 | http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) 132 | instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) 133 | } 134 | } 135 | 136 | dynamic "monitoring" { 137 | for_each = var.enable_monitoring != null ? [1] : [] 138 | content { 139 | enabled = var.enable_monitoring 140 | } 141 | } 142 | 143 | dynamic "network_interfaces" { 144 | for_each = var.network_interfaces 145 | content { 146 | associate_carrier_ip_address = lookup(network_interfaces.value, "associate_carrier_ip_address", null) 147 | associate_public_ip_address = lookup(network_interfaces.value, "associate_public_ip_address", null) 148 | delete_on_termination = lookup(network_interfaces.value, "delete_on_termination", null) 149 | description = lookup(network_interfaces.value, "description", null) 150 | device_index = lookup(network_interfaces.value, "device_index", null) 151 | ipv4_addresses = lookup(network_interfaces.value, "ipv4_addresses", null) != null ? network_interfaces.value.ipv4_addresses : [] 152 | ipv4_address_count = lookup(network_interfaces.value, "ipv4_address_count", null) 153 | ipv6_addresses = lookup(network_interfaces.value, "ipv6_addresses", null) != null ? network_interfaces.value.ipv6_addresses : [] 154 | ipv6_address_count = lookup(network_interfaces.value, "ipv6_address_count", null) 155 | network_interface_id = lookup(network_interfaces.value, "network_interface_id", null) 156 | private_ip_address = lookup(network_interfaces.value, "private_ip_address", null) 157 | security_groups = lookup(network_interfaces.value, "security_groups", null) != null ? network_interfaces.value.security_groups : [] 158 | # Set on EKS managed node group, will fail if set here 159 | # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics 160 | # subnet_id = lookup(network_interfaces.value, "subnet_id", null) 161 | } 162 | } 163 | 164 | dynamic "placement" { 165 | for_each = var.placement != null ? [var.placement] : [] 166 | content { 167 | affinity = lookup(placement.value, "affinity", null) 168 | availability_zone = lookup(placement.value, "availability_zone", null) 169 | group_name = lookup(placement.value, "group_name", null) 170 | host_id = lookup(placement.value, "host_id", null) 171 | spread_domain = lookup(placement.value, "spread_domain", null) 172 | tenancy = lookup(placement.value, "tenancy", null) 173 | partition_number = lookup(placement.value, "partition_number", null) 174 | } 175 | } 176 | 177 | dynamic "tag_specifications" { 178 | for_each = toset(["instance", "volume", "network-interface"]) 179 | content { 180 | resource_type = tag_specifications.key 181 | tags = merge( 182 | module.labels.tags, 183 | { Name = module.labels.id }) 184 | } 185 | } 186 | 187 | 188 | lifecycle { 189 | create_before_destroy = true 190 | } 191 | 192 | tags = module.labels.tags 193 | } 194 | 195 | ################################################################################ 196 | # Node Group 197 | ################################################################################ 198 | 199 | resource "aws_eks_node_group" "this" { 200 | count = var.enabled ? 1 : 0 201 | 202 | # Required 203 | cluster_name = var.cluster_name 204 | node_role_arn = var.iam_role_arn 205 | subnet_ids = var.subnet_ids 206 | 207 | scaling_config { 208 | min_size = var.min_size 209 | max_size = var.max_size 210 | desired_size = var.desired_size 211 | } 212 | 213 | # Optional 214 | node_group_name = module.labels.id 215 | 216 | # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami 217 | ami_type = var.ami_id != "" ? null : var.ami_type 218 | release_version = var.ami_id != "" ? null : var.ami_release_version 219 | version = var.ami_id != "" ? null : var.cluster_version 220 | 221 | capacity_type = var.capacity_type 222 | disk_size = var.disk_size 223 | force_update_version = var.force_update_version 224 | instance_types = var.instance_types 225 | labels = var.labels 226 | 227 | dynamic "launch_template" { 228 | for_each = var.enabled ? [1] : [] 229 | content { 230 | name = try(aws_launch_template.this[0].name) 231 | version = try(aws_launch_template.this[0].latest_version) 232 | } 233 | } 234 | 235 | dynamic "remote_access" { 236 | for_each = length(var.remote_access) > 0 ? [var.remote_access] : [] 237 | content { 238 | ec2_ssh_key = try(remote_access.value.ec2_ssh_key, null) 239 | source_security_group_ids = try(remote_access.value.source_security_group_ids, []) 240 | } 241 | } 242 | 243 | dynamic "taint" { 244 | for_each = var.taints 245 | content { 246 | key = taint.value.key 247 | value = lookup(taint.value, "value") 248 | effect = taint.value.effect 249 | } 250 | } 251 | 252 | dynamic "update_config" { 253 | for_each = length(var.update_config) > 0 ? [var.update_config] : [] 254 | content { 255 | max_unavailable_percentage = try(update_config.value.max_unavailable_percentage, null) 256 | max_unavailable = try(update_config.value.max_unavailable, null) 257 | } 258 | } 259 | 260 | timeouts { 261 | create = lookup(var.timeouts, "create", null) 262 | update = lookup(var.timeouts, "update", null) 263 | delete = lookup(var.timeouts, "delete", null) 264 | } 265 | 266 | lifecycle { 267 | create_before_destroy = true 268 | ignore_changes = [ 269 | scaling_config[0].desired_size, 270 | ] 271 | } 272 | 273 | tags = module.labels.tags 274 | } 275 | 276 | #-----------------------------------------------ASG-Schedule---------------------------------------------------------------- 277 | 278 | resource "aws_autoscaling_schedule" "this" { 279 | for_each = var.enabled && var.create_schedule ? var.schedules : {} 280 | 281 | scheduled_action_name = each.key 282 | autoscaling_group_name = aws_eks_node_group.this[0].resources[0].autoscaling_groups[0].name 283 | 284 | min_size = lookup(each.value, "min_size", null) 285 | max_size = lookup(each.value, "max_size", null) 286 | desired_capacity = lookup(each.value, "desired_size", null) 287 | start_time = lookup(each.value, "start_time", null) 288 | end_time = lookup(each.value, "end_time", null) 289 | time_zone = lookup(each.value, "time_zone", null) 290 | 291 | # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] 292 | # Cron examples: https://crontab.guru/examples.html 293 | recurrence = lookup(each.value, "recurrence", null) 294 | } -------------------------------------------------------------------------------- /node_group/aws_managed/outputs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Launch template 3 | ################################################################################ 4 | 5 | output "launch_template_id" { 6 | description = "The ID of the launch template" 7 | value = try(aws_launch_template.this[0].id, "") 8 | } 9 | 10 | output "launch_template_arn" { 11 | description = "The ARN of the launch template" 12 | value = try(aws_launch_template.this[0].arn, "") 13 | } 14 | 15 | output "launch_template_latest_version" { 16 | description = "The latest version of the launch template" 17 | value = try(aws_launch_template.this[0].latest_version, "") 18 | } 19 | 20 | ################################################################################ 21 | # Node Group 22 | ################################################################################ 23 | 24 | output "node_group_arn" { 25 | description = "Amazon Resource Name (ARN) of the EKS Node Group" 26 | value = try(aws_eks_node_group.this[0].arn, "") 27 | } 28 | 29 | output "node_group_id" { 30 | description = "EKS Cluster name and EKS Node Group name separated by a colon (`:`)" 31 | value = try(aws_eks_node_group.this[0].id, "") 32 | } 33 | 34 | output "node_group_resources" { 35 | description = "List of objects containing information about underlying resources" 36 | value = try(aws_eks_node_group.this[0].resources, "") 37 | } 38 | 39 | output "node_group_status" { 40 | description = "Status of the EKS Node Group" 41 | value = try(aws_eks_node_group.this[0].arn, "") 42 | } 43 | -------------------------------------------------------------------------------- /node_group/aws_managed/variables.tf: -------------------------------------------------------------------------------- 1 | #Module : LABEL 2 | #Description : Terraform label module variables. 3 | variable "name" { 4 | type = string 5 | default = "" 6 | description = "Name (e.g. `app` or `cluster`)." 7 | } 8 | 9 | variable "repository" { 10 | type = string 11 | default = "https://github.com/clouddrove/terraform-aws-eks" 12 | description = "Terraform current module repo" 13 | } 14 | 15 | variable "environment" { 16 | type = string 17 | default = "" 18 | description = "Environment (e.g. `prod`, `dev`, `staging`)." 19 | } 20 | 21 | variable "label_order" { 22 | type = list(any) 23 | default = [] 24 | description = "Label order, e.g. `name`,`application`." 25 | } 26 | 27 | variable "managedby" { 28 | type = string 29 | default = "hello@clouddrove.com" 30 | description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." 31 | } 32 | 33 | variable "attributes" { 34 | type = list(any) 35 | default = [] 36 | description = "Additional attributes (e.g. `1`)." 37 | } 38 | 39 | variable "tags" { 40 | type = map(any) 41 | default = {} 42 | description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." 43 | } 44 | 45 | 46 | variable "enabled" { 47 | type = bool 48 | default = true 49 | description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." 50 | } 51 | 52 | variable "cluster_name" { 53 | description = "Name of associated EKS cluster" 54 | type = string 55 | default = null 56 | } 57 | 58 | #-----------------------------------------------------------Launch_Template--------------------------------------------------------- 59 | 60 | variable "launch_template_description" { 61 | description = "Description of the launch template" 62 | type = string 63 | default = null 64 | } 65 | 66 | variable "ebs_optimized" { 67 | description = "If true, the launched EC2 instance(s) will be EBS-optimized" 68 | type = bool 69 | default = null 70 | } 71 | 72 | variable "ami_id" { 73 | description = "The AMI from which to launch the instance. If not supplied, EKS will use its own default image" 74 | type = string 75 | default = "" 76 | } 77 | 78 | variable "key_name" { 79 | description = "The key name that should be used for the instance(s)" 80 | type = string 81 | default = null 82 | } 83 | 84 | variable "vpc_security_group_ids" { 85 | description = "A list of security group IDs to associate" 86 | type = list(string) 87 | default = [] 88 | } 89 | 90 | variable "launch_template_default_version" { 91 | description = "Default version of the launch template" 92 | type = string 93 | default = null 94 | } 95 | 96 | variable "update_launch_template_default_version" { 97 | description = "Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version`" 98 | type = bool 99 | default = true 100 | } 101 | 102 | variable "disable_api_termination" { 103 | description = "If true, enables EC2 instance termination protection" 104 | type = bool 105 | default = null 106 | } 107 | 108 | variable "kernel_id" { 109 | description = "The kernel ID" 110 | type = string 111 | default = null 112 | } 113 | 114 | variable "ram_disk_id" { 115 | description = "The ID of the ram disk" 116 | type = string 117 | default = null 118 | } 119 | 120 | variable "block_device_mappings" { 121 | description = "Specify volumes to attach to the instance besides the volumes specified by the AMI" 122 | type = any 123 | default = {} 124 | } 125 | 126 | variable "capacity_reservation_specification" { 127 | description = "Targeting for EC2 capacity reservations" 128 | type = any 129 | default = null 130 | } 131 | 132 | variable "cpu_options" { 133 | description = "The CPU options for the instance" 134 | type = map(string) 135 | default = null 136 | } 137 | 138 | variable "credit_specification" { 139 | description = "Customize the credit specification of the instance" 140 | type = map(string) 141 | default = null 142 | } 143 | 144 | variable "elastic_gpu_specifications" { 145 | description = "The elastic GPU to attach to the instance" 146 | type = map(string) 147 | default = null 148 | } 149 | 150 | variable "elastic_inference_accelerator" { 151 | description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance" 152 | type = map(string) 153 | default = null 154 | } 155 | 156 | variable "enclave_options" { 157 | description = "Enable Nitro Enclaves on launched instances" 158 | type = map(string) 159 | default = null 160 | } 161 | 162 | variable "instance_market_options" { 163 | description = "The market (purchasing) option for the instance" 164 | type = any 165 | default = null 166 | } 167 | 168 | variable "license_specifications" { 169 | description = "A list of license specifications to associate with" 170 | type = map(string) 171 | default = null 172 | } 173 | 174 | variable "metadata_options" { 175 | description = "Customize the metadata options for the instance" 176 | type = map(string) 177 | default = { 178 | http_endpoint = "enabled" 179 | http_tokens = "required" 180 | http_put_response_hop_limit = 2 181 | } 182 | } 183 | 184 | variable "kms_key_id" { 185 | type = string 186 | default = null 187 | description = "The KMS ID of EBS volume" 188 | } 189 | 190 | 191 | variable "enable_monitoring" { 192 | description = "Enables/disables detailed monitoring" 193 | type = bool 194 | default = false 195 | } 196 | 197 | variable "network_interfaces" { 198 | description = "Customize network interfaces to be attached at instance boot time" 199 | type = list(any) 200 | default = [] 201 | } 202 | 203 | variable "placement" { 204 | description = "The placement of the instance" 205 | type = map(string) 206 | default = null 207 | } 208 | 209 | variable "launch_template_tags" { 210 | description = "A map of additional tags to add to the tag_specifications of launch template created" 211 | type = map(string) 212 | default = {} 213 | } 214 | 215 | #EKS_Managed_Node_Group 216 | 217 | variable "subnet_ids" { 218 | description = "Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME`" 219 | type = list(string) 220 | default = null 221 | } 222 | 223 | variable "min_size" { 224 | description = "Minimum number of instances/nodes" 225 | type = number 226 | default = 0 227 | } 228 | 229 | variable "max_size" { 230 | description = "Maximum number of instances/nodes" 231 | type = number 232 | default = 3 233 | } 234 | 235 | variable "desired_size" { 236 | description = "Desired number of instances/nodes" 237 | type = number 238 | default = 1 239 | } 240 | 241 | variable "ami_type" { 242 | description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64`" 243 | type = string 244 | default = null 245 | } 246 | 247 | variable "ami_release_version" { 248 | description = "AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version" 249 | type = string 250 | default = null 251 | } 252 | 253 | variable "capacity_type" { 254 | description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`" 255 | type = string 256 | default = "ON_DEMAND" 257 | } 258 | 259 | variable "disk_size" { 260 | description = "Disk size in GiB for nodes. Defaults to `20`" 261 | type = number 262 | default = null 263 | } 264 | 265 | variable "force_update_version" { 266 | description = "Force version update if existing pods are unable to be drained due to a pod disruption budget issue" 267 | type = bool 268 | default = null 269 | } 270 | 271 | variable "iam_role_arn" { 272 | type = string 273 | default = "" 274 | description = "" 275 | } 276 | 277 | 278 | variable "instance_types" { 279 | description = "Set of instance types associated with the EKS Node Group. Defaults to `[\"t3.medium\"]`" 280 | type = list(string) 281 | default = null 282 | } 283 | 284 | variable "labels" { 285 | description = "Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed" 286 | type = map(string) 287 | default = null 288 | } 289 | 290 | variable "cluster_version" { 291 | description = "Kubernetes version. Defaults to EKS Cluster Kubernetes version" 292 | type = string 293 | default = null 294 | } 295 | 296 | variable "remote_access" { 297 | description = "Configuration block with remote access settings" 298 | type = any 299 | default = {} 300 | } 301 | 302 | variable "taints" { 303 | description = "The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group" 304 | type = any 305 | default = {} 306 | } 307 | 308 | variable "update_config" { 309 | description = "Configuration block of settings for max unavailable resources during node group updates" 310 | type = map(string) 311 | default = {} 312 | } 313 | 314 | variable "timeouts" { 315 | description = "Create, update, and delete timeout configurations for the node group" 316 | type = map(string) 317 | default = {} 318 | } 319 | 320 | variable "before_cluster_joining_userdata" { 321 | type = string 322 | default = "" 323 | description = "Additional commands to execute on each worker node before joining the EKS cluster (before executing the `bootstrap.sh` script). For more info, see https://kubedex.com/90-days-of-aws-eks-in-test" 324 | } 325 | 326 | #-----------------------------------------------ASG-Schedule---------------------------------------------------------------- 327 | 328 | variable "create_schedule" { 329 | description = "Determines whether to create autoscaling group schedule or not" 330 | type = bool 331 | default = true 332 | } 333 | 334 | variable "schedules" { 335 | description = "Map of autoscaling group schedule to create" 336 | type = map(any) 337 | default = {} 338 | } 339 | -------------------------------------------------------------------------------- /node_group/aws_managed/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.5.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 5.11.0" 8 | } 9 | cloudinit = { 10 | source = "hashicorp/cloudinit" 11 | version = ">= 2.0" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /node_group/fargate_profile/fargate.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 3.1.15" 6 | } 7 | } 8 | } 9 | 10 | #Module : label 11 | #Description : Terraform module to create consistent naming for multiple names. 12 | module "labels" { 13 | source = "clouddrove/labels/aws" 14 | version = "1.3.0" 15 | 16 | name = var.name 17 | environment = var.environment 18 | managedby = var.managedby 19 | delimiter = var.delimiter 20 | attributes = compact(concat(var.attributes, ["fargate"])) 21 | label_order = var.label_order 22 | } 23 | 24 | 25 | #Module : IAM ROLE 26 | #Description : Provides an IAM role. 27 | resource "aws_iam_role" "fargate_role" { 28 | count = var.enabled && var.fargate_enabled ? 1 : 0 29 | 30 | name = format("%s-fargate-role", module.labels.id) 31 | assume_role_policy = data.aws_iam_policy_document.aws_eks_fargate_policy[0].json 32 | tags = module.labels.tags 33 | } 34 | 35 | resource "aws_iam_role_policy_attachment" "amazon_eks_fargate_pod_execution_role_policy" { 36 | count = var.enabled && var.fargate_enabled ? 1 : 0 37 | 38 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy" 39 | role = aws_iam_role.fargate_role[0].name 40 | } 41 | 42 | #Module : EKS Fargate 43 | #Descirption : Enabling fargate for AWS EKS 44 | resource "aws_eks_fargate_profile" "default" { 45 | for_each = var.enabled && var.fargate_enabled ? var.fargate_profiles : {} 46 | 47 | cluster_name = var.cluster_name 48 | fargate_profile_name = format("%s-%s", module.labels.id, each.value.addon_name) 49 | pod_execution_role_arn = aws_iam_role.fargate_role[0].arn 50 | subnet_ids = var.subnet_ids 51 | tags = module.labels.tags 52 | 53 | selector { 54 | namespace = lookup(each.value, "namespace", "default") 55 | labels = lookup(each.value, "labels", null) 56 | } 57 | } 58 | 59 | # AWS EKS Fargate policy 60 | data "aws_iam_policy_document" "aws_eks_fargate_policy" { 61 | count = var.enabled && var.fargate_enabled ? 1 : 0 62 | 63 | statement { 64 | effect = "Allow" 65 | actions = ["sts:AssumeRole"] 66 | 67 | principals { 68 | type = "Service" 69 | identifiers = ["eks-fargate-pods.amazonaws.com"] 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /node_group/fargate_profile/variables.tf: -------------------------------------------------------------------------------- 1 | #Module : LABEL 2 | #Description : Terraform label module variables. 3 | variable "name" { 4 | type = string 5 | default = "" 6 | description = "Name (e.g. `app` or `cluster`)." 7 | } 8 | 9 | variable "environment" { 10 | type = string 11 | default = "" 12 | description = "Environment (e.g. `prod`, `dev`, `staging`)." 13 | } 14 | 15 | variable "label_order" { 16 | type = list(any) 17 | default = [] 18 | description = "Label order, e.g. `name`,`application`." 19 | } 20 | 21 | variable "attributes" { 22 | type = list(any) 23 | default = [] 24 | description = "Additional attributes (e.g. `1`)." 25 | } 26 | 27 | variable "tags" { 28 | type = map(any) 29 | default = {} 30 | description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." 31 | } 32 | 33 | variable "managedby" { 34 | type = string 35 | default = "hello@clouddorve.com" 36 | description = "ManagedBy, eg 'pps'." 37 | } 38 | 39 | variable "delimiter" { 40 | type = string 41 | default = "-" 42 | description = "Delimiter to be used between `organization`, `environment`, `name` and `attributes`." 43 | } 44 | 45 | variable "enabled" { 46 | type = bool 47 | default = true 48 | description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." 49 | } 50 | 51 | variable "fargate_enabled" { 52 | type = bool 53 | default = false 54 | description = "Whether fargate profile is enabled or not" 55 | } 56 | 57 | variable "fargate_profiles" { 58 | type = map(any) 59 | default = {} 60 | description = "The number of Fargate Profiles that would be created." 61 | } 62 | 63 | variable "cluster_name" { 64 | type = string 65 | default = "" 66 | description = "The name of the EKS cluster." 67 | } 68 | 69 | variable "subnet_ids" { 70 | type = list(string) 71 | description = "A list of subnet IDs to launch resources in." 72 | } -------------------------------------------------------------------------------- /node_group/self_managed/_userdata.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /etc/eks/bootstrap.sh --apiserver-endpoint '${cluster_endpoint}' --b64-cluster-ca '${certificate_authority_data}' ${bootstrap_extra_args} '${cluster_name}' 3 | -------------------------------------------------------------------------------- /node_group/self_managed/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | self_managed_node_group_default_tags = { 3 | "Name" = "${module.labels.id}" 4 | "Environment" = "${var.environment}" 5 | "kubernetes.io/cluster/${var.cluster_name}" = "owned" 6 | "k8s.io/cluster/${var.cluster_name}" = "owned" 7 | } 8 | } 9 | 10 | data "aws_partition" "current" {} 11 | 12 | data "aws_caller_identity" "current" {} 13 | 14 | 15 | #AMI AMAZON LINUX 16 | data "aws_ami" "eks_default" { 17 | count = var.enabled ? 1 : 0 18 | 19 | filter { 20 | name = "name" 21 | values = ["amazon-eks-node-${var.kubernetes_version}-v*"] 22 | } 23 | 24 | most_recent = true 25 | owners = ["amazon"] 26 | } 27 | 28 | data "template_file" "userdata" { 29 | count = var.enabled ? 1 : 0 30 | template = file("${path.module}/_userdata.tpl") 31 | 32 | vars = { 33 | cluster_endpoint = var.cluster_endpoint 34 | certificate_authority_data = var.cluster_auth_base64 35 | cluster_name = var.cluster_name 36 | bootstrap_extra_args = var.bootstrap_extra_args 37 | 38 | } 39 | } 40 | #Module : label 41 | #Description : Terraform module to create consistent naming for multiple names. 42 | module "labels" { 43 | source = "clouddrove/labels/aws" 44 | version = "1.3.0" 45 | 46 | name = var.name 47 | repository = var.repository 48 | environment = var.environment 49 | managedby = var.managedby 50 | extra_tags = var.tags 51 | attributes = compact(concat(var.attributes, ["nodes"])) 52 | label_order = var.label_order 53 | } 54 | 55 | 56 | resource "aws_launch_template" "this" { 57 | count = var.enabled ? 1 : 0 58 | name = module.labels.id 59 | 60 | ebs_optimized = var.ebs_optimized 61 | image_id = data.aws_ami.eks_default[0].image_id 62 | instance_type = var.instance_type 63 | key_name = var.key_name 64 | user_data = base64encode(data.template_file.userdata[0].rendered) 65 | disable_api_termination = var.disable_api_termination 66 | instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior 67 | kernel_id = var.kernel_id 68 | ram_disk_id = var.ram_disk_id 69 | 70 | 71 | #volumes 72 | dynamic "block_device_mappings" { 73 | for_each = var.block_device_mappings 74 | content { 75 | device_name = block_device_mappings.value.device_name 76 | no_device = lookup(block_device_mappings.value, "no_device", null) 77 | virtual_name = lookup(block_device_mappings.value, "virtual_name", null) 78 | 79 | 80 | dynamic "ebs" { 81 | for_each = flatten([lookup(block_device_mappings.value, "ebs", [])]) 82 | content { 83 | delete_on_termination = true 84 | encrypted = true 85 | kms_key_id = var.kms_key_id 86 | iops = lookup(ebs.value, "iops", null) 87 | throughput = lookup(ebs.value, "throughput", null) 88 | snapshot_id = lookup(ebs.value, "snapshot_id", null) 89 | volume_size = lookup(ebs.value, "volume_size", null) 90 | volume_type = lookup(ebs.value, "volume_type", null) 91 | } 92 | } 93 | } 94 | } 95 | 96 | # capacity_reservation 97 | dynamic "capacity_reservation_specification" { 98 | for_each = var.capacity_reservation_specification != null ? [var.capacity_reservation_specification] : [] 99 | content { 100 | capacity_reservation_preference = lookup(capacity_reservation_specification.value, "capacity_reservation_preference", null) 101 | 102 | dynamic "capacity_reservation_target" { 103 | for_each = lookup(capacity_reservation_specification.value, "capacity_reservation_target", []) 104 | content { 105 | capacity_reservation_id = lookup(capacity_reservation_target.value, "capacity_reservation_id", null) 106 | } 107 | } 108 | } 109 | } 110 | 111 | #CPU option 112 | dynamic "cpu_options" { 113 | for_each = var.cpu_options != null ? [var.cpu_options] : [] 114 | content { 115 | core_count = cpu_options.value.core_count 116 | threads_per_core = cpu_options.value.threads_per_core 117 | } 118 | } 119 | 120 | #credit_specification 121 | dynamic "credit_specification" { 122 | for_each = var.credit_specification != null ? [var.credit_specification] : [] 123 | content { 124 | cpu_credits = credit_specification.value.cpu_credits 125 | } 126 | } 127 | 128 | dynamic "elastic_gpu_specifications" { 129 | for_each = var.elastic_gpu_specifications != null ? [var.elastic_gpu_specifications] : [] 130 | content { 131 | type = elastic_gpu_specifications.value.type 132 | } 133 | } 134 | 135 | dynamic "elastic_inference_accelerator" { 136 | for_each = var.elastic_inference_accelerator != null ? [var.elastic_inference_accelerator] : [] 137 | content { 138 | type = elastic_inference_accelerator.value.type 139 | } 140 | } 141 | 142 | dynamic "enclave_options" { 143 | for_each = var.enclave_options != null ? [var.enclave_options] : [] 144 | content { 145 | enabled = enclave_options.value.enabled 146 | } 147 | } 148 | 149 | dynamic "hibernation_options" { 150 | for_each = var.hibernation_options != null ? [var.hibernation_options] : [] 151 | content { 152 | configured = hibernation_options.value.configured 153 | } 154 | } 155 | 156 | iam_instance_profile { 157 | arn = var.iam_instance_profile_arn 158 | } 159 | 160 | 161 | dynamic "instance_market_options" { 162 | for_each = var.instance_market_options != null ? [var.instance_market_options] : [] 163 | content { 164 | market_type = instance_market_options.value.market_type 165 | 166 | dynamic "spot_options" { 167 | for_each = lookup(instance_market_options.value, "spot_options", null) != null ? [instance_market_options.value.spot_options] : [] 168 | content { 169 | block_duration_minutes = lookup(spot_options.value, block_duration_minutes, null) 170 | instance_interruption_behavior = lookup(spot_options.value, "instance_interruption_behavior", null) 171 | max_price = lookup(spot_options.value, "max_price", null) 172 | spot_instance_type = lookup(spot_options.value, "spot_instance_type", null) 173 | valid_until = lookup(spot_options.value, "valid_until", null) 174 | } 175 | } 176 | } 177 | } 178 | 179 | dynamic "license_specification" { 180 | for_each = var.license_specifications != null ? [var.license_specifications] : [] 181 | content { 182 | license_configuration_arn = license_specifications.value.license_configuration_arn 183 | } 184 | } 185 | 186 | dynamic "metadata_options" { 187 | for_each = var.metadata_options != null ? [var.metadata_options] : [] 188 | content { 189 | http_endpoint = lookup(metadata_options.value, "http_endpoint", null) 190 | http_tokens = lookup(metadata_options.value, "http_tokens", null) 191 | http_put_response_hop_limit = lookup(metadata_options.value, "http_put_response_hop_limit", null) 192 | http_protocol_ipv6 = lookup(metadata_options.value, "http_protocol_ipv6", null) 193 | instance_metadata_tags = lookup(metadata_options.value, "instance_metadata_tags", null) 194 | } 195 | } 196 | 197 | dynamic "monitoring" { 198 | for_each = var.enable_monitoring != null ? [1] : [] 199 | content { 200 | enabled = var.enable_monitoring 201 | } 202 | } 203 | 204 | 205 | network_interfaces { 206 | description = module.labels.id 207 | device_index = 0 208 | associate_public_ip_address = var.associate_public_ip_address 209 | delete_on_termination = true 210 | security_groups = var.security_group_ids 211 | } 212 | 213 | dynamic "placement" { 214 | for_each = var.placement != null ? [var.placement] : [] 215 | content { 216 | affinity = lookup(placement.value, "affinity", null) 217 | availability_zone = lookup(placement.value, "availability_zone", null) 218 | group_name = lookup(placement.value, "group_name", null) 219 | host_id = lookup(placement.value, "host_id", null) 220 | spread_domain = lookup(placement.value, "spread_domain", null) 221 | tenancy = lookup(placement.value, "tenancy", null) 222 | partition_number = lookup(placement.value, "partition_number", null) 223 | } 224 | } 225 | 226 | 227 | dynamic "tag_specifications" { 228 | for_each = toset(["instance", "volume", "network-interface"]) 229 | content { 230 | resource_type = tag_specifications.key 231 | tags = merge( 232 | module.labels.tags, 233 | { Name = module.labels.id }) 234 | } 235 | } 236 | 237 | lifecycle { 238 | create_before_destroy = true 239 | } 240 | 241 | 242 | tags = module.labels.tags 243 | 244 | } 245 | 246 | 247 | resource "aws_autoscaling_group" "this" { 248 | count = var.enabled ? 1 : 0 249 | 250 | name = module.labels.id 251 | 252 | dynamic "launch_template" { 253 | for_each = var.use_mixed_instances_policy ? [] : [1] 254 | 255 | content { 256 | name = aws_launch_template.this[0].name 257 | version = aws_launch_template.this[0].latest_version 258 | } 259 | } 260 | 261 | availability_zones = var.availability_zones 262 | vpc_zone_identifier = var.subnet_ids 263 | 264 | min_size = var.min_size 265 | max_size = var.max_size 266 | desired_capacity = var.desired_size 267 | capacity_rebalance = var.capacity_rebalance 268 | min_elb_capacity = var.min_elb_capacity 269 | wait_for_elb_capacity = var.wait_for_elb_capacity 270 | wait_for_capacity_timeout = var.wait_for_capacity_timeout 271 | default_cooldown = var.default_cooldown 272 | protect_from_scale_in = var.protect_from_scale_in 273 | 274 | target_group_arns = var.target_group_arns 275 | placement_group = var.placement_group 276 | health_check_type = var.health_check_type 277 | health_check_grace_period = var.health_check_grace_period 278 | 279 | force_delete = var.force_delete 280 | termination_policies = var.termination_policies 281 | suspended_processes = var.suspended_processes 282 | max_instance_lifetime = var.max_instance_lifetime 283 | 284 | enabled_metrics = var.enabled_metrics 285 | metrics_granularity = var.metrics_granularity 286 | service_linked_role_arn = var.service_linked_role_arn 287 | 288 | dynamic "initial_lifecycle_hook" { 289 | for_each = var.initial_lifecycle_hooks 290 | content { 291 | name = initial_lifecycle_hook.value.name 292 | default_result = lookup(initial_lifecycle_hook.value, "default_result", null) 293 | heartbeat_timeout = lookup(initial_lifecycle_hook.value, "heartbeat_timeout", null) 294 | lifecycle_transition = initial_lifecycle_hook.value.lifecycle_transition 295 | notification_metadata = lookup(initial_lifecycle_hook.value, "notification_metadata", null) 296 | notification_target_arn = lookup(initial_lifecycle_hook.value, "notification_target_arn", null) 297 | role_arn = lookup(initial_lifecycle_hook.value, "role_arn", null) 298 | } 299 | } 300 | 301 | dynamic "instance_refresh" { 302 | for_each = var.instance_refresh != null ? [var.instance_refresh] : [] 303 | content { 304 | strategy = instance_refresh.value.strategy 305 | triggers = lookup(instance_refresh.value, "triggers", null) 306 | 307 | dynamic "preferences" { 308 | for_each = lookup(instance_refresh.value, "preferences", null) != null ? [instance_refresh.value.preferences] : [] 309 | content { 310 | instance_warmup = lookup(preferences.value, "instance_warmup", null) 311 | min_healthy_percentage = lookup(preferences.value, "min_healthy_percentage", null) 312 | checkpoint_delay = lookup(preferences.value, "checkpoint_delay", null) 313 | checkpoint_percentages = lookup(preferences.value, "checkpoint_percentages", null) 314 | } 315 | } 316 | } 317 | } 318 | 319 | dynamic "mixed_instances_policy" { 320 | for_each = var.use_mixed_instances_policy ? [var.mixed_instances_policy] : [] 321 | content { 322 | dynamic "instances_distribution" { 323 | for_each = try([mixed_instances_policy.value.instances_distribution], []) 324 | content { 325 | on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null) 326 | on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null) 327 | on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null) 328 | spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null) 329 | spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null) 330 | spot_max_price = lookup(instances_distribution.value, "spot_max_price", null) 331 | } 332 | } 333 | 334 | launch_template { 335 | launch_template_specification { 336 | launch_template_name = aws_launch_template.this[0].name 337 | version = aws_launch_template.this[0].latest_version 338 | } 339 | 340 | dynamic "override" { 341 | for_each = try(mixed_instances_policy.value.override, []) 342 | content { 343 | instance_type = lookup(override.value, "instance_type", null) 344 | weighted_capacity = lookup(override.value, "weighted_capacity", null) 345 | 346 | dynamic "launch_template_specification" { 347 | for_each = lookup(override.value, "launch_template_specification", null) != null ? override.value.launch_template_specification : [] 348 | content { 349 | launch_template_id = lookup(launch_template_specification.value, "launch_template_id", null) 350 | } 351 | } 352 | } 353 | } 354 | } 355 | } 356 | } 357 | 358 | dynamic "warm_pool" { 359 | for_each = var.warm_pool != null ? [var.warm_pool] : [] 360 | content { 361 | pool_state = lookup(warm_pool.value, "pool_state", null) 362 | min_size = lookup(warm_pool.value, "min_size", null) 363 | max_group_prepared_capacity = lookup(warm_pool.value, "max_group_prepared_capacity", null) 364 | } 365 | } 366 | 367 | timeouts { 368 | delete = var.cluster_delete_timeout 369 | } 370 | 371 | 372 | lifecycle { 373 | create_before_destroy = true 374 | ignore_changes = [ 375 | desired_capacity 376 | ] 377 | } 378 | 379 | dynamic "tag" { 380 | for_each = merge(local.self_managed_node_group_default_tags, var.tags) 381 | content { 382 | key = tag.key 383 | value = tag.value 384 | propagate_at_launch = true 385 | } 386 | } 387 | } 388 | 389 | #---------------------------------------------------ASG-schedule----------------------------------------------------------- 390 | 391 | resource "aws_autoscaling_schedule" "this" { 392 | for_each = var.enabled && var.create_schedule ? var.schedules : {} 393 | 394 | scheduled_action_name = each.key 395 | autoscaling_group_name = aws_autoscaling_group.this[0].name 396 | 397 | min_size = lookup(each.value, "min_size", null) 398 | max_size = lookup(each.value, "max_size", null) 399 | desired_capacity = lookup(each.value, "desired_size", null) 400 | start_time = lookup(each.value, "start_time", null) 401 | end_time = lookup(each.value, "end_time", null) 402 | time_zone = lookup(each.value, "time_zone", null) 403 | 404 | # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] 405 | # Cron examples: https://crontab.guru/examples.html 406 | recurrence = lookup(each.value, "recurrence", null) 407 | } 408 | 409 | 410 | 411 | -------------------------------------------------------------------------------- /node_group/self_managed/outputs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Launch template 3 | ################################################################################ 4 | 5 | output "launch_template_id" { 6 | description = "The ID of the launch template" 7 | value = try(aws_launch_template.this[0].id, "") 8 | } 9 | 10 | output "launch_template_arn" { 11 | description = "The ARN of the launch template" 12 | value = try(aws_launch_template.this[0].arn, "") 13 | } 14 | 15 | output "launch_template_latest_version" { 16 | description = "The latest version of the launch template" 17 | value = try(aws_launch_template.this[0].latest_version, "") 18 | } 19 | 20 | ################################################################################ 21 | # autoscaling group 22 | ################################################################################ 23 | 24 | output "autoscaling_group_name" { 25 | description = "The autoscaling group name" 26 | value = try(aws_autoscaling_group.this[0].name, "") 27 | } 28 | 29 | output "autoscaling_group_arn" { 30 | description = "The ARN for this autoscaling group" 31 | value = try(aws_autoscaling_group.this[0].arn, "") 32 | } 33 | 34 | output "autoscaling_group_id" { 35 | description = "The autoscaling group id" 36 | value = try(aws_autoscaling_group.this[0].id, "") 37 | } 38 | 39 | output "autoscaling_group_min_size" { 40 | description = "The minimum size of the autoscaling group" 41 | value = try(aws_autoscaling_group.this[0].min_size, "") 42 | } 43 | 44 | output "autoscaling_group_max_size" { 45 | description = "The maximum size of the autoscaling group" 46 | value = try(aws_autoscaling_group.this[0].max_size, "") 47 | } 48 | 49 | output "autoscaling_group_desired_capacity" { 50 | description = "The number of Amazon EC2 instances that should be running in the group" 51 | value = try(aws_autoscaling_group.this[0].desired_capacity, "") 52 | } 53 | 54 | output "autoscaling_group_default_cooldown" { 55 | description = "Time between a scaling activity and the succeeding scaling activity" 56 | value = try(aws_autoscaling_group.this[0].default_cooldown, "") 57 | } 58 | 59 | output "autoscaling_group_health_check_grace_period" { 60 | description = "Time after instance comes into service before checking health" 61 | value = try(aws_autoscaling_group.this[0].health_check_grace_period, "") 62 | } 63 | 64 | output "autoscaling_group_health_check_type" { 65 | description = "EC2 or ELB. Controls how health checking is done" 66 | value = try(aws_autoscaling_group.this[0].health_check_type, "") 67 | } 68 | 69 | output "autoscaling_group_availability_zones" { 70 | description = "The availability zones of the autoscaling group" 71 | value = try(aws_autoscaling_group.this[0].availability_zones, "") 72 | } 73 | 74 | output "autoscaling_group_vpc_zone_identifier" { 75 | description = "The VPC zone identifier" 76 | value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, "") 77 | } 78 | 79 | ################################################################################ 80 | # autoscaling group schedule 81 | ################################################################################ 82 | 83 | output "autoscaling_group_schedule_arns" { 84 | description = "ARNs of autoscaling group schedules" 85 | value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } 86 | } 87 | -------------------------------------------------------------------------------- /node_group/self_managed/variables.tf: -------------------------------------------------------------------------------- 1 | #Module : LABEL 2 | #Description : Terraform label module variables. 3 | variable "name" { 4 | type = string 5 | default = "" 6 | description = "Name (e.g. `app` or `cluster`)." 7 | } 8 | 9 | variable "repository" { 10 | type = string 11 | default = "https://github.com/clouddrove/terraform-aws-eks" 12 | description = "Terraform current module repo" 13 | } 14 | 15 | variable "environment" { 16 | type = string 17 | default = "" 18 | description = "Environment (e.g. `prod`, `dev`, `staging`)." 19 | } 20 | 21 | variable "label_order" { 22 | type = list(any) 23 | default = [] 24 | description = "Label order, e.g. `name`,`application`." 25 | } 26 | 27 | variable "managedby" { 28 | type = string 29 | default = "hello@clouddrove.com" 30 | description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." 31 | } 32 | 33 | variable "attributes" { 34 | type = list(any) 35 | default = [] 36 | description = "Additional attributes (e.g. `1`)." 37 | } 38 | 39 | variable "tags" { 40 | type = map(any) 41 | default = {} 42 | description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." 43 | } 44 | 45 | 46 | variable "enabled" { 47 | type = bool 48 | default = true 49 | description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." 50 | } 51 | 52 | 53 | #-----------------------------------------------------------EKS--------------------------------------------------------- 54 | variable "kubernetes_version" { 55 | type = string 56 | default = "" 57 | description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used." 58 | } 59 | 60 | variable "cluster_endpoint" { 61 | type = string 62 | default = "" 63 | description = "Endpoint of associated EKS cluster" 64 | } 65 | 66 | variable "cluster_name" { 67 | type = string 68 | default = "" 69 | description = "The name of the EKS cluster." 70 | } 71 | 72 | variable "cluster_auth_base64" { 73 | description = "Base64 encoded CA of associated EKS cluster" 74 | type = string 75 | default = "" 76 | } 77 | 78 | variable "cluster_service_ipv4_cidr" { 79 | type = string 80 | default = null 81 | description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" 82 | } 83 | 84 | variable "pre_bootstrap_user_data" { 85 | type = string 86 | default = "" 87 | description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" 88 | } 89 | 90 | variable "post_bootstrap_user_data" { 91 | type = string 92 | default = "" 93 | description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" 94 | } 95 | 96 | variable "bootstrap_extra_args" { 97 | type = string 98 | default = "" 99 | description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" 100 | } 101 | 102 | #-----------------------------------------------------------Launch_Template--------------------------------------------------------- 103 | 104 | variable "ebs_optimized" { 105 | type = bool 106 | default = null 107 | description = "If true, the launched EC2 instance will be EBS-optimized" 108 | } 109 | 110 | variable "instance_type" { 111 | type = string 112 | default = "t3.medium" 113 | description = "The type of the instance to launch" 114 | } 115 | 116 | variable "key_name" { 117 | description = "The key name that should be used for the instance" 118 | type = string 119 | default = null 120 | } 121 | 122 | variable "associate_public_ip_address" { 123 | type = bool 124 | default = false 125 | description = "Associate a public IP address with an instance in a VPC." 126 | } 127 | 128 | variable "security_group_ids" { 129 | type = list(string) 130 | default = [] 131 | description = "A list of associated security group IDs." 132 | } 133 | 134 | variable "disable_api_termination" { 135 | type = bool 136 | default = null 137 | description = "If true, enables EC2 instance termination protection" 138 | } 139 | 140 | variable "instance_initiated_shutdown_behavior" { 141 | type = string 142 | default = null 143 | description = "Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`)" 144 | } 145 | 146 | variable "kernel_id" { 147 | type = string 148 | default = null 149 | description = "The kernel ID" 150 | } 151 | 152 | variable "ram_disk_id" { 153 | type = string 154 | default = null 155 | description = "The ID of the ram disk" 156 | } 157 | 158 | variable "block_device_mappings" { 159 | type = any 160 | default = {} 161 | description = "Specify volumes to attach to the instance besides the volumes specified by the AMI" 162 | } 163 | 164 | variable "kms_key_id" { 165 | type = string 166 | default = null 167 | description = "The KMS ID of EBS volume" 168 | } 169 | 170 | variable "capacity_reservation_specification" { 171 | type = any 172 | default = null 173 | description = "Targeting for EC2 capacity reservations" 174 | } 175 | 176 | variable "cpu_options" { 177 | type = map(string) 178 | default = null 179 | description = "The CPU options for the instance" 180 | } 181 | 182 | variable "credit_specification" { 183 | type = map(string) 184 | default = null 185 | description = "Customize the credit specification of the instance" 186 | } 187 | 188 | variable "elastic_gpu_specifications" { 189 | type = map(string) 190 | default = null 191 | description = "The elastic GPU to attach to the instance" 192 | } 193 | 194 | variable "elastic_inference_accelerator" { 195 | type = map(string) 196 | default = null 197 | description = "Configuration block containing an Elastic Inference Accelerator to attach to the instance" 198 | } 199 | 200 | variable "enclave_options" { 201 | type = map(string) 202 | default = null 203 | description = "Enable Nitro Enclaves on launched instances" 204 | } 205 | 206 | variable "hibernation_options" { 207 | type = map(string) 208 | default = null 209 | description = "The hibernation options for the instance" 210 | } 211 | 212 | variable "instance_market_options" { 213 | type = any 214 | default = null 215 | description = "The market (purchasing) option for the instance" 216 | } 217 | 218 | variable "license_specifications" { 219 | type = map(string) 220 | default = null 221 | description = "A list of license specifications to associate with" 222 | } 223 | 224 | variable "metadata_options" { 225 | type = map(string) 226 | default = { 227 | http_endpoint = "enabled" 228 | http_tokens = "required" 229 | http_put_response_hop_limit = 2 230 | } 231 | description = "Customize the metadata options for the instance" 232 | 233 | } 234 | 235 | variable "enable_monitoring" { 236 | type = bool 237 | default = true 238 | description = "Enables/disables detailed monitoring" 239 | } 240 | variable "iam_instance_profile_arn" { 241 | type = string 242 | default = null 243 | description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group" 244 | } 245 | 246 | 247 | variable "placement" { 248 | type = map(string) 249 | default = null 250 | description = "The placement of the instance" 251 | } 252 | 253 | #------------------------------------------------Auto-Scaling----------------------------------------------------------- 254 | variable "use_mixed_instances_policy" { 255 | type = bool 256 | default = false 257 | description = "Determines whether to use a mixed instances policy in the autoscaling group or not" 258 | } 259 | 260 | variable "availability_zones" { 261 | type = list(string) 262 | default = null 263 | description = "A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids`" 264 | } 265 | 266 | variable "subnet_ids" { 267 | type = list(string) 268 | default = null 269 | description = "A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`" 270 | } 271 | 272 | variable "min_size" { 273 | description = "The minimum size of the autoscaling group" 274 | type = number 275 | default = 0 276 | } 277 | 278 | variable "max_size" { 279 | description = "The maximum size of the autoscaling group" 280 | type = number 281 | default = 3 282 | } 283 | 284 | variable "desired_size" { 285 | description = "The number of Amazon EC2 instances that should be running in the autoscaling group" 286 | type = number 287 | default = 1 288 | } 289 | 290 | variable "capacity_rebalance" { 291 | description = "Indicates whether capacity rebalance is enabled" 292 | type = bool 293 | default = null 294 | } 295 | 296 | variable "min_elb_capacity" { 297 | description = "Setting this causes Terraform to wait for this number of instances to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes" 298 | type = number 299 | default = null 300 | } 301 | 302 | variable "wait_for_elb_capacity" { 303 | description = "Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior." 304 | type = number 305 | default = null 306 | } 307 | 308 | variable "wait_for_capacity_timeout" { 309 | description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior." 310 | type = string 311 | default = null 312 | } 313 | 314 | variable "default_cooldown" { 315 | description = "The amount of time, in seconds, after a scaling activity completes before another scaling activity can start" 316 | type = number 317 | default = null 318 | } 319 | 320 | variable "protect_from_scale_in" { 321 | description = "Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." 322 | type = bool 323 | default = false 324 | } 325 | 326 | variable "target_group_arns" { 327 | description = "A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing" 328 | type = list(string) 329 | default = [] 330 | } 331 | 332 | variable "placement_group" { 333 | description = "The name of the placement group into which you'll launch your instances, if any" 334 | type = string 335 | default = null 336 | } 337 | 338 | variable "health_check_type" { 339 | description = "`EC2` or `ELB`. Controls how health checking is done" 340 | type = string 341 | default = null 342 | } 343 | 344 | variable "health_check_grace_period" { 345 | description = "Time (in seconds) after instance comes into service before checking health" 346 | type = number 347 | default = null 348 | } 349 | 350 | variable "force_delete" { 351 | description = "Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling" 352 | type = bool 353 | default = null 354 | } 355 | 356 | variable "termination_policies" { 357 | description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`" 358 | type = list(string) 359 | default = null 360 | } 361 | 362 | variable "suspended_processes" { 363 | description = "A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly" 364 | type = list(string) 365 | default = null 366 | } 367 | 368 | variable "max_instance_lifetime" { 369 | description = "The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds" 370 | type = number 371 | default = null 372 | } 373 | 374 | variable "enabled_metrics" { 375 | description = "A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`" 376 | type = list(string) 377 | default = null 378 | } 379 | 380 | variable "metrics_granularity" { 381 | description = "The granularity to associate with the metrics to collect. The only valid value is `1Minute`" 382 | type = string 383 | default = null 384 | } 385 | 386 | variable "service_linked_role_arn" { 387 | description = "The ARN of the service-linked role that the ASG will use to call other AWS services" 388 | type = string 389 | default = null 390 | } 391 | variable "initial_lifecycle_hooks" { 392 | description = "One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource" 393 | type = list(map(string)) 394 | default = [] 395 | } 396 | 397 | variable "instance_refresh" { 398 | description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated" 399 | type = any 400 | default = null 401 | } 402 | 403 | variable "mixed_instances_policy" { 404 | description = "Configuration block containing settings to define launch targets for Auto Scaling groups" 405 | type = any 406 | default = null 407 | } 408 | 409 | variable "warm_pool" { 410 | description = "If this block is configured, add a Warm Pool to the specified Auto Scaling group" 411 | type = any 412 | default = null 413 | } 414 | 415 | variable "delete_timeout" { 416 | description = "Delete timeout to wait for destroying autoscaling group" 417 | type = string 418 | default = null 419 | } 420 | 421 | variable "propagate_tags" { 422 | description = "A list of tag blocks. Each element should have keys named `key`, `value`, and `propagate_at_launch`" 423 | type = list(map(string)) 424 | default = [] 425 | } 426 | 427 | #-----------------------------------------------TimeOuts---------------------------------------------------------------- 428 | 429 | variable "cluster_delete_timeout" { 430 | description = "Timeout value when deleting the EKS cluster." 431 | type = string 432 | default = "15m" 433 | } 434 | 435 | #---------------------------------------------------ASG-schedule----------------------------------------------------------- 436 | variable "create_schedule" { 437 | description = "Determines whether to create autoscaling group schedule or not" 438 | type = bool 439 | default = true 440 | } 441 | 442 | variable "schedules" { 443 | description = "Map of autoscaling group schedule to create" 444 | type = map(any) 445 | default = {} 446 | } 447 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_arn" { 2 | value = try(aws_eks_cluster.default[0].arn, "") 3 | description = "The Amazon Resource Name (ARN) of the cluster" 4 | } 5 | 6 | output "cluster_certificate_authority_data" { 7 | value = try(aws_eks_cluster.default[0].certificate_authority[0].data, "") 8 | description = "Base64 encoded certificate data required to communicate with the cluster" 9 | } 10 | 11 | output "cluster_endpoint" { 12 | value = try(aws_eks_cluster.default[0].endpoint, "") 13 | description = "Endpoint for your Kubernetes API server" 14 | } 15 | 16 | output "cluster_id" { 17 | value = try(aws_eks_cluster.default[0].id, "") 18 | description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" 19 | } 20 | 21 | output "cluster_oidc_issuer_url" { 22 | value = try(aws_eks_cluster.default[0].identity[0].oidc[0].issuer, "") 23 | description = "The URL on the EKS cluster for the OpenID Connect identity provider" 24 | } 25 | 26 | output "cluster_platform_version" { 27 | value = try(aws_eks_cluster.default[0].platform_version, "") 28 | description = "Platform version for the cluster" 29 | } 30 | 31 | output "cluster_status" { 32 | value = try(aws_eks_cluster.default[0].status, "") 33 | description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" 34 | } 35 | 36 | output "cluster_primary_security_group_id" { 37 | value = try(aws_eks_cluster.default[0].vpc_config[0].cluster_security_group_id, "") 38 | description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use default security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" 39 | } 40 | 41 | output "node_security_group_arn" { 42 | description = "Amazon Resource Name (ARN) of the node shared security group" 43 | value = try(aws_security_group.node_group[0].arn, "") 44 | } 45 | 46 | output "node_security_group_id" { 47 | value = try(aws_security_group.node_group[0].id, "") 48 | description = "ID of the node shared security group" 49 | } 50 | 51 | output "oidc_provider_arn" { 52 | value = try(aws_iam_openid_connect_provider.default[0].arn, "") 53 | description = "The ARN of the OIDC Provider if `enable_irsa = true`" 54 | } 55 | 56 | output "cluster_iam_role_name" { 57 | value = try(aws_iam_role.default[0].name, "") 58 | description = "IAM role name of the EKS cluster" 59 | } 60 | 61 | output "cluster_iam_role_arn" { 62 | value = try(aws_iam_role.default[0].arn, "") 63 | description = "IAM role ARN of the EKS cluster" 64 | } 65 | 66 | output "cluster_iam_role_unique_id" { 67 | value = try(aws_iam_role.default[0].unique_id, "") 68 | description = "Stable and unique string identifying the IAM role" 69 | } 70 | 71 | output "node_group_iam_role_name" { 72 | value = try(aws_iam_role.node_groups[0].name, "") 73 | description = "IAM role name of the EKS cluster" 74 | } 75 | 76 | output "node_group_iam_role_arn" { 77 | value = try(aws_iam_role.node_groups[0].arn, "") 78 | description = "IAM role ARN of the EKS cluster" 79 | } 80 | 81 | output "node_group_iam_role_unique_id" { 82 | value = try(aws_iam_role.node_groups[0].unique_id, "") 83 | description = "Stable and unique string identifying the IAM role" 84 | } 85 | 86 | output "tags" { 87 | value = module.labels.tags 88 | } 89 | 90 | output "cluster_name" { 91 | value = module.labels.id 92 | } -------------------------------------------------------------------------------- /security_groups.tf: -------------------------------------------------------------------------------- 1 | #Module : SECURITY GROUP 2 | #Description : Provides a security group resource. 3 | 4 | resource "aws_security_group" "node_group" { 5 | count = var.enabled ? 1 : 0 6 | name = "${module.labels.id}-node-group" 7 | description = "Security Group for nodes Groups" 8 | vpc_id = var.vpc_id 9 | tags = module.labels.tags 10 | } 11 | 12 | #Module : SECURITY GROUP RULE EGRESS 13 | #Description : Provides a security group rule resource. Represents a single egress group rule, 14 | # which can be added to external Security Groups. 15 | 16 | #tfsec:ignore:aws-ec2-no-public-egress-sgr ## To allow all outbound traffic from eks nodes. 17 | resource "aws_security_group_rule" "node_group" { 18 | count = var.enabled ? 1 : 0 19 | description = "Allow all egress traffic" 20 | from_port = 0 21 | to_port = 0 22 | protocol = "-1" 23 | cidr_blocks = ["0.0.0.0/0"] 24 | security_group_id = aws_security_group.node_group[0].id 25 | type = "egress" 26 | } 27 | 28 | #Module : SECURITY GROUP RULE INGRESS SELF 29 | #Description : Provides a security group rule resource. Represents a single ingress group rule, 30 | # which can be added to external Security Groups. 31 | resource "aws_security_group_rule" "ingress_self" { 32 | count = var.enabled ? 1 : 0 33 | description = "Allow nodes to communicate with each other" 34 | from_port = 0 35 | to_port = 65535 36 | protocol = "-1" 37 | security_group_id = aws_security_group.node_group[0].id 38 | source_security_group_id = aws_security_group.node_group[0].id 39 | type = "ingress" 40 | } 41 | 42 | #Module : SECURITY GROUP 43 | #Description : Provides a security group rule resource. Represents a single ingress group rule, 44 | # which can be added to external Security Groups. 45 | resource "aws_security_group_rule" "ingress_security_groups_node_group" { 46 | count = var.enabled ? length(var.allowed_security_groups) : 0 47 | description = "Allow inbound traffic from existing Security Groups" 48 | from_port = 0 49 | to_port = 65535 50 | protocol = "-1" 51 | source_security_group_id = element(var.allowed_security_groups, count.index) 52 | security_group_id = aws_security_group.node_group[0].id 53 | type = "ingress" 54 | } 55 | 56 | #Module : SECURITY GROUP RULE CIDR BLOCK 57 | #Description : Provides a security group rule resource. Represents a single ingress group rule, 58 | # which can be added to external Security Groups. 59 | resource "aws_security_group_rule" "ingress_cidr_blocks_node_group" { 60 | count = var.enabled && length(var.allowed_cidr_blocks) > 0 ? 1 : 0 61 | description = "Allow inbound traffic from CIDR blocks" 62 | from_port = 0 63 | to_port = 0 64 | protocol = "-1" 65 | cidr_blocks = var.allowed_cidr_blocks 66 | security_group_id = aws_security_group.node_group[0].id 67 | type = "ingress" 68 | } -------------------------------------------------------------------------------- /self_node_groups.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | metadata_options = { 3 | http_endpoint = "enabled" 4 | http_tokens = "required" 5 | http_put_response_hop_limit = 2 6 | } 7 | } 8 | 9 | ################################################################################ 10 | # Self Managed Node Group 11 | ################################################################################ 12 | 13 | module "self_managed_node_group" { 14 | source = "./node_group/self_managed" 15 | 16 | for_each = { for k, v in var.self_node_groups : k => v if var.enabled } 17 | 18 | enabled = try(each.value.enabled, true) 19 | 20 | cluster_name = aws_eks_cluster.default[0].name 21 | security_group_ids = compact( 22 | concat( 23 | aws_security_group.node_group.*.id, 24 | aws_eks_cluster.default.*.vpc_config.0.cluster_security_group_id 25 | ) 26 | ) 27 | 28 | iam_instance_profile_arn = aws_iam_instance_profile.default[0].arn 29 | 30 | # Autoscaling Group 31 | name = try(each.value.name, each.key) 32 | environment = var.environment 33 | repository = var.repository 34 | 35 | 36 | availability_zones = try(each.value.availability_zones, var.self_node_group_defaults.availability_zones, null) 37 | subnet_ids = try(each.value.subnet_ids, var.self_node_group_defaults.subnet_ids, var.subnet_ids) 38 | key_name = try(each.value.key_name, var.self_node_group_defaults.key_name, null) 39 | 40 | min_size = try(each.value.min_size, var.self_node_group_defaults.min_size, 0) 41 | max_size = try(each.value.max_size, var.self_node_group_defaults.max_size, 3) 42 | desired_size = try(each.value.desired_size, var.self_node_group_defaults.desired_size, 1) 43 | capacity_rebalance = try(each.value.capacity_rebalance, var.self_node_group_defaults.capacity_rebalance, null) 44 | min_elb_capacity = try(each.value.min_elb_capacity, var.self_node_group_defaults.min_elb_capacity, null) 45 | wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_node_group_defaults.wait_for_elb_capacity, null) 46 | wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_node_group_defaults.wait_for_capacity_timeout, null) 47 | default_cooldown = try(each.value.default_cooldown, var.self_node_group_defaults.default_cooldown, null) 48 | protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_node_group_defaults.protect_from_scale_in, null) 49 | 50 | target_group_arns = try(each.value.target_group_arns, var.self_node_group_defaults.target_group_arns, null) 51 | placement_group = try(each.value.placement_group, var.self_node_group_defaults.placement_group, null) 52 | health_check_type = try(each.value.health_check_type, var.self_node_group_defaults.health_check_type, null) 53 | health_check_grace_period = try(each.value.health_check_grace_period, var.self_node_group_defaults.health_check_grace_period, null) 54 | 55 | force_delete = try(each.value.force_delete, var.self_node_group_defaults.force_delete, null) 56 | termination_policies = try(each.value.termination_policies, var.self_node_group_defaults.termination_policies, null) 57 | suspended_processes = try(each.value.suspended_processes, var.self_node_group_defaults.suspended_processes, null) 58 | max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_node_group_defaults.max_instance_lifetime, null) 59 | 60 | enabled_metrics = try(each.value.enabled_metrics, var.self_node_group_defaults.enabled_metrics, null) 61 | metrics_granularity = try(each.value.metrics_granularity, var.self_node_group_defaults.metrics_granularity, null) 62 | service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_node_group_defaults.service_linked_role_arn, null) 63 | 64 | initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_node_group_defaults.initial_lifecycle_hooks, []) 65 | instance_refresh = try(each.value.instance_refresh, var.self_node_group_defaults.instance_refresh, null) 66 | use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_node_group_defaults.use_mixed_instances_policy, false) 67 | mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_node_group_defaults.mixed_instances_policy, null) 68 | warm_pool = try(each.value.warm_pool, var.self_node_group_defaults.warm_pool, null) 69 | 70 | #------------ASG-Schedule-------------------------------------------------- 71 | create_schedule = try(each.value.create_schedule, var.self_node_group_defaults.create_schedule, false) 72 | schedules = try(each.value.schedules, var.self_node_group_defaults.schedules, var.schedules) 73 | 74 | delete_timeout = try(each.value.delete_timeout, var.self_node_group_defaults.delete_timeout, null) 75 | 76 | # User data 77 | cluster_endpoint = try(aws_eks_cluster.default[0].endpoint, "") 78 | cluster_auth_base64 = try(aws_eks_cluster.default[0].certificate_authority[0].data, "") 79 | pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_node_group_defaults.pre_bootstrap_user_data, "") 80 | post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_node_group_defaults.post_bootstrap_user_data, "") 81 | bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_node_group_defaults.bootstrap_extra_args, "") 82 | 83 | # Launch Template 84 | 85 | 86 | ebs_optimized = try(each.value.ebs_optimized, var.self_node_group_defaults.ebs_optimized, true) 87 | kubernetes_version = try(each.value.kubernetes_version, var.self_node_group_defaults.cluster_version, var.kubernetes_version) 88 | instance_type = try(each.value.instance_type, var.self_node_group_defaults.instance_type, "m6i.large") 89 | kms_key_id = try(each.value.kms_key_id, var.self_node_group_defaults.ebs_optimized, null) 90 | 91 | disable_api_termination = try(each.value.disable_api_termination, var.self_node_group_defaults.disable_api_termination, null) 92 | instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_node_group_defaults.instance_initiated_shutdown_behavior, null) 93 | kernel_id = try(each.value.kernel_id, var.self_node_group_defaults.kernel_id, null) 94 | ram_disk_id = try(each.value.ram_disk_id, var.self_node_group_defaults.ram_disk_id, null) 95 | 96 | block_device_mappings = try(each.value.block_device_mappings, var.self_node_group_defaults.block_device_mappings, []) 97 | capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_node_group_defaults.capacity_reservation_specification, null) 98 | cpu_options = try(each.value.cpu_options, var.self_node_group_defaults.cpu_options, null) 99 | credit_specification = try(each.value.credit_specification, var.self_node_group_defaults.credit_specification, null) 100 | elastic_gpu_specifications = try(each.value.elastic_gpu_specifications, var.self_node_group_defaults.elastic_gpu_specifications, null) 101 | elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.self_node_group_defaults.elastic_inference_accelerator, null) 102 | enclave_options = try(each.value.enclave_options, var.self_node_group_defaults.enclave_options, null) 103 | hibernation_options = try(each.value.hibernation_options, var.self_node_group_defaults.hibernation_options, null) 104 | instance_market_options = try(each.value.instance_market_options, var.self_node_group_defaults.instance_market_options, null) 105 | license_specifications = try(each.value.license_specifications, var.self_node_group_defaults.license_specifications, null) 106 | metadata_options = try(each.value.metadata_options, var.self_node_group_defaults.metadata_options, local.metadata_options) 107 | enable_monitoring = try(each.value.enable_monitoring, var.self_node_group_defaults.enable_monitoring, false) 108 | # network_interfaces = try(each.value.network_interfaces, var.self_node_group_defaults.network_interfaces, []) 109 | placement = try(each.value.placement, var.self_node_group_defaults.placement, null) 110 | 111 | tags = merge(var.tags, try(each.value.tags, var.self_node_group_defaults.tags, {})) 112 | propagate_tags = try(each.value.propagate_tags, var.self_node_group_defaults.propagate_tags, []) 113 | 114 | } 115 | 116 | 117 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | #Module : LABEL 2 | #Description : Terraform label module variables. 3 | variable "name" { 4 | type = string 5 | default = "" 6 | description = "Name (e.g. `app` or `cluster`)." 7 | } 8 | 9 | variable "repository" { 10 | type = string 11 | default = "https://github.com/clouddrove/terraform-aws-eks" 12 | description = "Terraform current module repo" 13 | } 14 | 15 | variable "environment" { 16 | type = string 17 | default = "" 18 | description = "Environment (e.g. `prod`, `dev`, `staging`)." 19 | } 20 | 21 | variable "label_order" { 22 | type = list(any) 23 | default = ["name", "environment"] 24 | description = "Label order, e.g. `name`,`application`." 25 | } 26 | 27 | variable "managedby" { 28 | type = string 29 | default = "hello@clouddrove.com" 30 | description = "ManagedBy, eg 'CloudDrove' or 'AnmolNagpal'." 31 | } 32 | 33 | variable "attributes" { 34 | type = list(any) 35 | default = [] 36 | description = "Additional attributes (e.g. `1`)." 37 | } 38 | 39 | variable "tags" { 40 | type = map(any) 41 | default = {} 42 | description = "Additional tags (e.g. map(`BusinessUnit`,`XYZ`)." 43 | } 44 | 45 | variable "eks_tags" { 46 | type = map(any) 47 | default = {} 48 | description = "Additional tags for EKS Cluster only." 49 | } 50 | 51 | variable "enabled" { 52 | type = bool 53 | default = true 54 | description = "Whether to create the resources. Set to `false` to prevent the module from creating any resources." 55 | } 56 | 57 | #---------------------------------------------------------EKS----------------------------------------------------------- 58 | variable "cluster_encryption_config_resources" { 59 | type = list(any) 60 | default = ["secrets"] 61 | description = "Cluster Encryption Config Resources to encrypt, e.g. ['secrets']" 62 | } 63 | 64 | variable "enabled_cluster_log_types" { 65 | type = list(string) 66 | default = ["api", "audit", "authenticator", "controllerManager", "scheduler"] 67 | description = "A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]." 68 | } 69 | 70 | variable "cluster_log_retention_period" { 71 | type = number 72 | default = 30 73 | description = "Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html." 74 | } 75 | 76 | variable "kubernetes_version" { 77 | type = string 78 | default = "" 79 | description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used." 80 | } 81 | 82 | variable "oidc_provider_enabled" { 83 | type = bool 84 | default = true 85 | description = "Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using kiam or kube2iam. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html" 86 | } 87 | variable "eks_additional_security_group_ids" { 88 | type = list(string) 89 | default = [] 90 | description = "EKS additional security group id" 91 | } 92 | variable "nodes_additional_security_group_ids" { 93 | type = list(string) 94 | default = [] 95 | description = "EKS additional node group ids" 96 | } 97 | variable "addons" { 98 | type = any 99 | default = [] 100 | description = "Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources." 101 | } 102 | 103 | variable "cluster_ip_family" { 104 | description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created" 105 | type = string 106 | default = null 107 | } 108 | 109 | variable "cluster_service_ipv4_cidr" { 110 | description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" 111 | type = string 112 | default = null 113 | } 114 | 115 | variable "cluster_service_ipv6_cidr" { 116 | description = "The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster" 117 | type = string 118 | default = null 119 | } 120 | 121 | variable "outpost_config" { 122 | description = "Configuration for the AWS Outpost to provision the cluster on" 123 | type = any 124 | default = {} 125 | } 126 | 127 | #-----------------------------------------------------------KMS--------------------------------------------------------- 128 | variable "cluster_encryption_config_enabled" { 129 | type = bool 130 | default = true 131 | description = "Set to `true` to enable Cluster Encryption Configuration" 132 | } 133 | 134 | variable "cluster_encryption_config_kms_key_enable_key_rotation" { 135 | type = bool 136 | default = true 137 | description = "Cluster Encryption Config KMS Key Resource argument - enable kms key rotation" 138 | } 139 | 140 | variable "cluster_encryption_config_kms_key_deletion_window_in_days" { 141 | type = number 142 | default = 10 143 | description = "Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction" 144 | } 145 | 146 | variable "cluster_encryption_config_kms_key_policy" { 147 | type = string 148 | default = null 149 | description = "Cluster Encryption Config KMS Key Resource argument - key policy" 150 | } 151 | 152 | variable "openid_connect_audiences" { 153 | type = list(string) 154 | default = [] 155 | description = "List of OpenID Connect audience client IDs to add to the IRSA provider" 156 | } 157 | 158 | 159 | #---------------------------------------------------------IAM----------------------------------------------------------- 160 | variable "permissions_boundary" { 161 | type = string 162 | default = null 163 | description = "If provided, all IAM roles will be created with this permissions boundary attached." 164 | } 165 | 166 | variable "iam_role_additional_policies" { 167 | description = "Additional policies to be added to the IAM role" 168 | type = map(string) 169 | default = {} 170 | } 171 | 172 | #---------------------------------------------------------Security_Group------------------------------------------------ 173 | variable "allowed_security_groups" { 174 | type = list(string) 175 | default = [] 176 | description = "List of Security Group IDs to be allowed to connect to the EKS cluster." 177 | } 178 | 179 | variable "allowed_cidr_blocks" { 180 | type = list(string) 181 | default = [] 182 | description = "List of CIDR blocks to be allowed to connect to the EKS cluster." 183 | } 184 | 185 | #------------------------------------------------------------Networking------------------------------------------------- 186 | variable "vpc_id" { 187 | type = string 188 | default = "" 189 | description = "VPC ID for the EKS cluster." 190 | } 191 | 192 | variable "subnet_ids" { 193 | type = list(string) 194 | default = [] 195 | description = "A list of subnet IDs to launch the cluster in." 196 | } 197 | 198 | variable "public_access_cidrs" { 199 | type = list(string) 200 | default = ["0.0.0.0/0"] 201 | description = "Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0." 202 | } 203 | 204 | variable "endpoint_private_access" { 205 | type = bool 206 | default = true 207 | description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false." 208 | } 209 | 210 | variable "endpoint_public_access" { 211 | type = bool 212 | default = true 213 | description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true." 214 | } 215 | 216 | variable "vpc_security_group_ids" { 217 | type = list(string) 218 | default = [] 219 | description = "A list of security group IDs to associate" 220 | } 221 | #-----------------------------------------------TimeOuts---------------------------------------------------------------- 222 | 223 | variable "cluster_timeouts" { 224 | description = "Create, update, and delete timeout configurations for the cluster" 225 | type = map(string) 226 | default = {} 227 | } 228 | 229 | ################################################################################ 230 | # Self Managed Node Group 231 | ################################################################################ 232 | 233 | variable "self_node_groups" { 234 | type = any 235 | default = {} 236 | description = "Map of self-managed node group definitions to create" 237 | } 238 | 239 | variable "self_node_group_defaults" { 240 | type = any 241 | default = {} 242 | description = "Map of self-managed node group default configurations" 243 | } 244 | 245 | # AWS auth 246 | variable "apply_config_map_aws_auth" { 247 | type = bool 248 | default = true 249 | description = "Whether to generate local files from `kubeconfig` and `config_map_aws_auth` and perform `kubectl apply` to apply the ConfigMap to allow the worker nodes to join the EKS cluster." 250 | } 251 | 252 | variable "wait_for_cluster_command" { 253 | type = string 254 | default = "curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz" 255 | description = "`local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT`" 256 | } 257 | 258 | variable "local_exec_interpreter" { 259 | type = list(string) 260 | default = ["/bin/sh", "-c"] 261 | description = "shell to use for local_exec" 262 | } 263 | 264 | variable "map_additional_iam_roles" { 265 | type = list(object({ 266 | rolearn = string 267 | username = string 268 | groups = list(string) 269 | })) 270 | 271 | default = [] 272 | description = "Additional IAM roles to add to `config-map-aws-auth` ConfigMap" 273 | } 274 | 275 | variable "map_additional_iam_users" { 276 | type = list(object({ 277 | userarn = string 278 | username = string 279 | groups = list(string) 280 | })) 281 | 282 | default = [] 283 | description = "Additional IAM users to add to `config-map-aws-auth` ConfigMap" 284 | } 285 | 286 | variable "map_additional_aws_accounts" { 287 | type = list(string) 288 | default = [] 289 | description = "Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap" 290 | } 291 | 292 | #Managed 293 | variable "managed_node_group_defaults" { 294 | type = any 295 | default = {} 296 | description = "Map of eks-managed node group definitions to create" 297 | } 298 | 299 | variable "managed_node_group" { 300 | type = any 301 | default = {} 302 | description = "Map of eks-managed node group definitions to create" 303 | } 304 | 305 | #-----------------------------------------------ASG-Schedule---------------------------------------------------------------- 306 | 307 | variable "create_schedule" { 308 | description = "Determines whether to create autoscaling group schedule or not" 309 | type = bool 310 | default = true 311 | } 312 | 313 | variable "schedules" { 314 | description = "Map of autoscaling group schedule to create" 315 | type = map(any) 316 | default = {} 317 | } 318 | 319 | ##fargate profile 320 | 321 | variable "fargate_enabled" { 322 | type = bool 323 | default = false 324 | description = "Whether fargate profile is enabled or not" 325 | } 326 | 327 | variable "fargate_profiles" { 328 | type = map(any) 329 | default = {} 330 | description = "The number of Fargate Profiles that would be created." 331 | } -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | # Terraform version 2 | terraform { 3 | required_version = ">= 1.5.4" 4 | 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 5.11.0" 9 | } 10 | } 11 | } --------------------------------------------------------------------------------