├── .editorconfig
├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── bug_report.yml
│ ├── config.yml
│ ├── feature_request.md
│ ├── feature_request.yml
│ └── question.md
├── PULL_REQUEST_TEMPLATE.md
├── banner.png
├── mergify.yml
├── renovate.json
├── settings.yml
└── workflows
│ ├── branch.yml
│ ├── chatops.yml
│ ├── release.yml
│ └── scheduled.yml
├── .gitignore
├── LICENSE
├── README.md
├── README.yaml
├── atmos.yaml
├── auth.tf
├── context.tf
├── docs
├── migration-0.45.x+.md
├── migration-v1-v2.md
└── migration-v3-v4.md
├── examples
├── complete
│ ├── context.tf
│ ├── fixtures.us-east-2.tfvars
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ ├── versions.tf
│ └── vpc-cni.tf
└── obsolete-version2
│ ├── context.tf
│ ├── fixtures.us-east-2.tfvars
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── iam.tf
├── main.tf
├── outputs.tf
├── security-group.tf
├── test
├── .gitignore
├── Makefile
├── Makefile.alpine
└── src
│ ├── .gitignore
│ ├── Makefile
│ ├── examples_complete_test.go
│ ├── go.mod
│ └── go.sum
├── variables.tf
└── versions.tf
/.editorconfig:
--------------------------------------------------------------------------------
1 | # top-most EditorConfig file
2 | root = true
3 |
4 | # Unix-style newlines with a newline ending every file
5 | [*]
6 | end_of_line = lf
7 | insert_final_newline = true
8 | trim_trailing_whitespace = true
9 |
10 | # Override for Makefile
11 | [{Makefile,makefile,GNUmakefile}]
12 | indent_style = tab
13 | indent_size = 4
14 |
15 | [Makefile.*]
16 | indent_style = tab
17 | indent_size = 4
18 |
19 | [{*.yaml,*.yml,*.md}]
20 | indent_style = space
21 | indent_size = 2
22 |
23 | [*.sh]
24 | indent_style = tab
25 | indent_size = 2
26 |
27 | [*.{tf,tfvars,tpl,variant}]
28 | indent_style = space
29 | indent_size = 2
30 |
31 | [*.json]
32 | insert_final_newline = false
33 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # Use this file to define individuals or teams that are responsible for code in a repository.
2 | # Read more:
3 | #
4 | # Order is important: the last matching pattern has the highest precedence
5 |
6 | # These owners will be the default owners for everything
7 | * @cloudposse/engineering @cloudposse/contributors
8 |
9 | # Cloud Posse must review any changes to Makefiles
10 | **/Makefile @cloudposse/engineering
11 | **/Makefile.* @cloudposse/engineering
12 |
13 | # Cloud Posse must review any changes to GitHub actions
14 | .github/* @cloudposse/engineering
15 |
16 | # Cloud Posse must review any changes to standard context definition,
17 | # but some changes can be rubber-stamped.
18 | **/*.tf @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
19 | README.yaml @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
20 | README.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
21 | docs/*.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
22 |
23 | # Cloud Posse Admins must review all changes to CODEOWNERS or the mergify configuration
24 | .github/mergify.yml @cloudposse/admins
25 | .github/CODEOWNERS @cloudposse/admins
26 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: 'bug'
6 | assignees: ''
7 |
8 | ---
9 |
10 | Found a bug? Maybe our [Slack Community](https://slack.cloudposse.com) can help.
11 |
12 | [](https://slack.cloudposse.com)
13 |
14 | ## Describe the Bug
15 | A clear and concise description of what the bug is.
16 |
17 | ## Expected Behavior
18 | A clear and concise description of what you expected to happen.
19 |
20 | ## Steps to Reproduce
21 | Steps to reproduce the behavior:
22 | 1. Go to '...'
23 | 2. Run '....'
24 | 3. Enter '....'
25 | 4. See error
26 |
27 | ## Screenshots
28 | If applicable, add screenshots or logs to help explain your problem.
29 |
30 | ## Environment (please complete the following information):
31 |
32 | Anything that will help us triage the bug will help. Here are some ideas:
33 | - OS: [e.g. Linux, OSX, WSL, etc]
34 | - Version [e.g. 10.15]
35 |
36 | ## Additional Context
37 | Add any other context about the problem here.
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | description: Create a report to help us improve
4 | labels: ["bug"]
5 | assignees: [""]
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Found a bug?
11 |
12 | Please checkout our [Slack Community](https://slack.cloudposse.com)
13 | or visit our [Slack Archive](https://archive.sweetops.com/).
14 |
15 | [](https://slack.cloudposse.com)
16 |
17 | - type: textarea
18 | id: concise-description
19 | attributes:
20 | label: Describe the Bug
21 | description: A clear and concise description of what the bug is.
22 | placeholder: What is the bug about?
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | id: expected
28 | attributes:
29 | label: Expected Behavior
30 | description: A clear and concise description of what you expected.
31 | placeholder: What happened?
32 | validations:
33 | required: true
34 |
35 | - type: textarea
36 | id: reproduction-steps
37 | attributes:
38 | label: Steps to Reproduce
39 | description: Steps to reproduce the behavior.
40 | placeholder: How do we reproduce it?
41 | validations:
42 | required: true
43 |
44 | - type: textarea
45 | id: screenshots
46 | attributes:
47 | label: Screenshots
48 | description: If applicable, add screenshots or logs to help explain.
49 | validations:
50 | required: false
51 |
52 | - type: textarea
53 | id: environment
54 | attributes:
55 | label: Environment
56 | description: Anything that will help us triage the bug.
57 | placeholder: |
58 | - OS: [e.g. Linux, OSX, WSL, etc]
59 | - Version [e.g. 10.15]
60 | - Module version
61 | - Terraform version
62 | validations:
63 | required: false
64 |
65 | - type: textarea
66 | id: additional
67 | attributes:
68 | label: Additional Context
69 | description: |
70 | Add any other context about the problem here.
71 | validations:
72 | required: false
73 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
3 | contact_links:
4 |
5 | - name: Community Slack Team
6 | url: https://cloudposse.com/slack/
7 | about: |-
8 | Please ask and answer questions here.
9 |
10 | - name: Office Hours
11 | url: https://cloudposse.com/office-hours/
12 | about: |-
13 | Join us every Wednesday for FREE Office Hours (lunch & learn).
14 |
15 | - name: DevOps Accelerator Program
16 | url: https://cloudposse.com/accelerate/
17 | about: |-
18 | Own your infrastructure in record time. We build it. You drive it.
19 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: 'feature request'
6 | assignees: ''
7 |
8 | ---
9 |
10 | Have a question? Please checkout our [Slack Community](https://slack.cloudposse.com) or visit our [Slack Archive](https://archive.sweetops.com/).
11 |
12 | [](https://slack.cloudposse.com)
13 |
14 | ## Describe the Feature
15 |
16 | A clear and concise description of what the bug is.
17 |
18 | ## Expected Behavior
19 |
20 | A clear and concise description of what you expected to happen.
21 |
22 | ## Use Case
23 |
24 | Is your feature request related to a problem/challenge you are trying to solve? Please provide some additional context of why this feature or capability will be valuable.
25 |
26 | ## Describe Ideal Solution
27 |
28 | A clear and concise description of what you want to happen. If you don't know, that's okay.
29 |
30 | ## Alternatives Considered
31 |
32 | Explain what alternative solutions or features you've considered.
33 |
34 | ## Additional Context
35 |
36 | Add any other context or screenshots about the feature request here.
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | description: Suggest an idea for this project
4 | labels: ["feature request"]
5 | assignees: [""]
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Have a question?
11 |
12 | Please checkout our [Slack Community](https://slack.cloudposse.com)
13 | or visit our [Slack Archive](https://archive.sweetops.com/).
14 |
15 | [](https://slack.cloudposse.com)
16 |
17 | - type: textarea
18 | id: concise-description
19 | attributes:
20 | label: Describe the Feature
21 | description: A clear and concise description of what the feature is.
22 | placeholder: What is the feature about?
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | id: expected
28 | attributes:
29 | label: Expected Behavior
30 | description: A clear and concise description of what you expected.
31 | placeholder: What happened?
32 | validations:
33 | required: true
34 |
35 | - type: textarea
36 | id: use-case
37 | attributes:
38 | label: Use Case
39 | description: |
40 | Is your feature request related to a problem/challenge you are trying
41 | to solve?
42 |
43 | Please provide some additional context of why this feature or
44 | capability will be valuable.
45 | validations:
46 | required: true
47 |
48 | - type: textarea
49 | id: ideal-solution
50 | attributes:
51 | label: Describe Ideal Solution
52 | description: A clear and concise description of what you want to happen.
53 | validations:
54 | required: true
55 |
56 | - type: textarea
57 | id: alternatives-considered
58 | attributes:
59 | label: Alternatives Considered
60 | description: Explain alternative solutions or features considered.
61 | validations:
62 | required: false
63 |
64 | - type: textarea
65 | id: additional
66 | attributes:
67 | label: Additional Context
68 | description: |
69 | Add any other context about the problem here.
70 | validations:
71 | required: false
72 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudposse/terraform-aws-eks-cluster/2ef6f48d707f145868183ae3cea02990e915d4e4/.github/ISSUE_TEMPLATE/question.md
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## what
2 |
3 |
7 |
8 | ## why
9 |
10 |
15 |
16 | ## references
17 |
18 |
22 |
--------------------------------------------------------------------------------
/.github/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudposse/terraform-aws-eks-cluster/2ef6f48d707f145868183ae3cea02990e915d4e4/.github/banner.png
--------------------------------------------------------------------------------
/.github/mergify.yml:
--------------------------------------------------------------------------------
1 | extends: .github
2 |
--------------------------------------------------------------------------------
/.github/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "config:recommended",
4 | ":preserveSemverRanges",
5 | ":rebaseStalePrs"
6 | ],
7 | "baseBranches": ["main"],
8 | "labels": ["auto-update"],
9 | "dependencyDashboardAutoclose": true,
10 | "enabledManagers": ["terraform"],
11 | "terraform": {
12 | "ignorePaths": ["**/context.tf", "**/examples/obsolete*/**"]
13 | },
14 | "timezone": "America/New_York",
15 | "packageRules": [
16 | {
17 | "matchFileNames": [
18 | "/*.tf",
19 | "examples/complete/*.tf"
20 | ],
21 | "groupName": "all",
22 | "schedule": ["every 4 weeks on wednesday at 04:00 am"],
23 | "groupSlug": "monthly"
24 | }
25 | ]
26 | }
27 |
--------------------------------------------------------------------------------
/.github/settings.yml:
--------------------------------------------------------------------------------
1 | # Upstream changes from _extends are only recognized when modifications are made to this file in the default branch.
2 | _extends: .github
3 | repository:
4 | name: terraform-aws-eks-cluster
5 | description: Terraform module for provisioning an EKS cluster
6 | homepage: https://cloudposse.com/accelerate
7 | topics: terraform, terraform-module, eks, aws, masters, kubernetes, k8s, hcl2, eks-cluster, eks-workers, fargate
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.github/workflows/branch.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Branch
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | - release/**
8 | types: [opened, synchronize, reopened, labeled, unlabeled]
9 | push:
10 | branches:
11 | - main
12 | - release/v*
13 | paths-ignore:
14 | - '.github/**'
15 | - 'docs/**'
16 | - 'examples/**'
17 | - 'test/**'
18 | - 'README.md'
19 |
20 | permissions: {}
21 |
22 | jobs:
23 | terraform-module:
24 | uses: cloudposse/.github/.github/workflows/shared-terraform-module.yml@main
25 | secrets: inherit
26 |
--------------------------------------------------------------------------------
/.github/workflows/chatops.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: chatops
3 | on:
4 | issue_comment:
5 | types: [created]
6 |
7 | permissions:
8 | pull-requests: write
9 | id-token: write
10 | contents: write
11 | statuses: write
12 |
13 | jobs:
14 | test:
15 | uses: cloudposse/.github/.github/workflows/shared-terraform-chatops.yml@main
16 | if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, '/terratest') }}
17 | secrets: inherit
18 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: release
3 | on:
4 | release:
5 | types:
6 | - published
7 |
8 | permissions:
9 | id-token: write
10 | contents: write
11 | pull-requests: write
12 |
13 | jobs:
14 | terraform-module:
15 | uses: cloudposse/.github/.github/workflows/shared-release-branches.yml@main
16 | secrets: inherit
17 |
--------------------------------------------------------------------------------
/.github/workflows/scheduled.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: scheduled
3 | on:
4 | workflow_dispatch: { } # Allows manually trigger this workflow
5 | schedule:
6 | - cron: "0 3 * * *"
7 |
8 | permissions:
9 | pull-requests: write
10 | id-token: write
11 | contents: write
12 |
13 | jobs:
14 | scheduled:
15 | uses: cloudposse/.github/.github/workflows/shared-terraform-scheduled.yml@main
16 | secrets: inherit
17 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | **/.idea
9 | **/*.iml
10 |
11 | **/.build-harness
12 | **/build-harness
13 |
14 | **/pkg
15 |
16 | # Rendered yaml config
17 | **/configmap-auth.yaml
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2018-2024 Cloud Posse, LLC
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.yaml:
--------------------------------------------------------------------------------
1 | name: terraform-aws-eks-cluster
2 |
3 | license: APACHE2
4 |
5 | github_repo: cloudposse/terraform-aws-eks-cluster
6 |
7 | badges:
8 | - name: Latest Release
9 | image: https://img.shields.io/github/release/cloudposse/terraform-aws-eks-cluster.svg?style=for-the-badge
10 | url: https://github.com/cloudposse/terraform-aws-eks-cluster/releases/latest
11 | - name: Last Updated
12 | image: https://img.shields.io/github/last-commit/cloudposse/terraform-aws-eks-cluster.svg?style=for-the-badge
13 | url: https://github.com/cloudposse/terraform-aws-eks-cluster/commits
14 | - name: Slack Community
15 | image: https://slack.cloudposse.com/for-the-badge.svg
16 | url: https://cloudposse.com/slack
17 |
18 | # List any related terraform modules that this module may be used with or that this module depends on.
19 | related:
20 | - name: terraform-aws-components eks/clusters
21 | description: Cloud Posse's component (root module) using this module to provision an EKS cluster
22 | url: https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/cluster
23 | - name: terraform-aws-components eks/karpenter and eks/karpenter-provisioner
24 | description: Cloud Posse's components (root modules) deploying Karpenter to manage auto-scaling of EKS node groups
25 | url: https://github.com/cloudposse/terraform-aws-components/tree/main/modules/eks/karpenter
26 | - name: terraform-aws-eks-workers
27 | description: Terraform module to provision an AWS AutoScaling Group, IAM Role, and Security Group for EKS Workers
28 | url: https://github.com/cloudposse/terraform-aws-eks-workers
29 | - name: terraform-aws-ec2-autoscale-group
30 | description: Terraform module to provision Auto Scaling Group and Launch Template on AWS
31 | url: https://github.com/cloudposse/terraform-aws-ec2-autoscale-group
32 | - name: terraform-aws-ecs-container-definition
33 | description: Terraform module to generate well-formed JSON documents (container definitions) that are passed to the aws_ecs_task_definition Terraform resource
34 | url: https://github.com/cloudposse/terraform-aws-ecs-container-definition
35 | - name: terraform-aws-ecs-alb-service-task
36 | description: Terraform module which implements an ECS service which exposes a web service via ALB
37 | url: https://github.com/cloudposse/terraform-aws-ecs-alb-service-task
38 | - name: terraform-aws-ecs-web-app
39 | description: Terraform module that implements a web app on ECS and supports autoscaling, CI/CD, monitoring, ALB integration, and much more
40 | url: https://github.com/cloudposse/terraform-aws-ecs-web-app
41 | - name: terraform-aws-ecs-codepipeline
42 | description: Terraform module for CI/CD with AWS Code Pipeline and Code Build for ECS
43 | url: https://github.com/cloudposse/terraform-aws-ecs-codepipeline
44 | - name: terraform-aws-ecs-cloudwatch-autoscaling
45 | description: Terraform module to autoscale ECS Service based on CloudWatch metrics
46 | url: https://github.com/cloudposse/terraform-aws-ecs-cloudwatch-autoscaling
47 | - name: terraform-aws-ecs-cloudwatch-sns-alarms
48 | description: Terraform module to create CloudWatch Alarms on ECS Service level metrics
49 | url: https://github.com/cloudposse/terraform-aws-ecs-cloudwatch-sns-alarms
50 | - name: terraform-aws-ec2-instance
51 | description: Terraform module for providing a general purpose EC2 instance
52 | url: https://github.com/cloudposse/terraform-aws-ec2-instance
53 | - name: terraform-aws-ec2-instance-group
54 | description: Terraform module for provisioning multiple general purpose EC2 hosts for stateful applications
55 | url: https://github.com/cloudposse/terraform-aws-ec2-instance-group
56 |
57 | description: |-
58 | Terraform module to provision an [EKS](https://aws.amazon.com/eks/) cluster on AWS.
59 |
60 | This Terraform module provisions a fully configured AWS [EKS](https://aws.amazon.com/eks/) (Elastic Kubernetes Service) cluster.
61 | It's engineered to integrate smoothly with [Karpenter](https://karpenter.sh/) and [EKS addons](https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html),
62 | forming a critical part of [Cloud Posse's reference architecture](https://cloudposse.com/reference-architecture).
63 | Ideal for teams looking to deploy scalable and manageable Kubernetes clusters on AWS with minimal fuss.
64 |
65 | introduction: |-
66 | The module provisions the following resources:
67 |
68 | - EKS cluster of master nodes that can be used together with the
69 | [terraform-aws-eks-node-group](https://github.com/cloudposse/terraform-aws-eks-node-group) and
70 | [terraform-aws-eks-fargate-profile](https://github.com/cloudposse/terraform-aws-eks-fargate-profile)
71 | modules to create a full-blown EKS/Kubernetes cluster. You can also use the [terraform-aws-eks-workers](https://github.com/cloudposse/terraform-aws-eks-workers)
72 | module to provision worker nodes for the cluster, but it is now rare for that to be a better choice than to use `terraform-aws-eks-node-group`.
73 | - IAM Role to allow the cluster to access other AWS services
74 | - EKS access entries to allow IAM users to access and administer the cluster
75 |
76 | usage: |-
77 | For a complete example, see [examples/complete](examples/complete).
78 |
79 | For automated tests of the complete example using [bats](https://github.com/bats-core/bats-core) and [Terratest](https://github.com/gruntwork-io/terratest) (which tests and deploys the example on AWS), see [test/src](test/src).
80 |
81 | Other examples:
82 |
83 | - [terraform-aws-components/eks/cluster](https://github.com/cloudposse/terraform-aws-components/tree/master/modules/eks/cluster) - Cloud Posse's service catalog of "root module" invocations for provisioning reference architectures
84 |
85 | ```hcl
86 | provider "aws" {
87 | region = var.region
88 | }
89 |
90 | # Note: This example creates an explicit access entry for the current user,
91 | # but in practice, you should use a static map of IAM users or roles that should have access to the cluster.
92 | # Granting access to the current user in this way is not recommended for production use.
93 | data "aws_caller_identity" "current" {}
94 |
95 | # IAM session context converts an assumed role ARN into an IAM Role ARN.
96 | # Again, this is primarily to simplify the example, and in practice, you should use a static map of IAM users or roles.
97 | data "aws_iam_session_context" "current" {
98 | arn = data.aws_caller_identity.current.arn
99 | }
100 |
101 | locals {
102 | # The usage of the specific kubernetes.io/cluster/* resource tags below are required
103 | # for EKS and Kubernetes to discover and manage networking resources
104 | # https://aws.amazon.com/premiumsupport/knowledge-center/eks-vpc-subnet-discovery/
105 | # https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/deploy/subnet_discovery.md
106 | tags = { "kubernetes.io/cluster/${module.label.id}" = "shared" }
107 |
108 | # required tags to make ALB ingress work https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
109 | public_subnets_additional_tags = {
110 | "kubernetes.io/role/elb" : 1
111 | }
112 | private_subnets_additional_tags = {
113 | "kubernetes.io/role/internal-elb" : 1
114 | }
115 |
116 | # Enable the IAM user creating the cluster to administer it,
117 | # without using the bootstrap_cluster_creator_admin_permissions option,
118 | # as an example of how to use the access_entry_map feature.
119 | # In practice, this should be replaced with a static map of IAM users or roles
120 | # that should have access to the cluster, but we use the current user
121 | # to simplify the example.
122 | access_entry_map = {
123 | (data.aws_iam_session_context.current.issuer_arn) = {
124 | access_policy_associations = {
125 | ClusterAdmin = {}
126 | }
127 | }
128 | }
129 | }
130 |
131 | module "label" {
132 | source = "cloudposse/label/null"
133 | # Cloud Posse recommends pinning every module to a specific version
134 | # version = "x.x.x"
135 |
136 | namespace = var.namespace
137 | name = var.name
138 | stage = var.stage
139 | delimiter = var.delimiter
140 | tags = var.tags
141 | }
142 |
143 | module "vpc" {
144 | source = "cloudposse/vpc/aws"
145 | # Cloud Posse recommends pinning every module to a specific version
146 | # version = "x.x.x"
147 |
148 | ipv4_primary_cidr_block = "172.16.0.0/16"
149 |
150 | tags = local.tags
151 | context = module.label.context
152 | }
153 |
154 | module "subnets" {
155 | source = "cloudposse/dynamic-subnets/aws"
156 | # Cloud Posse recommends pinning every module to a specific version
157 | # version = "x.x.x"
158 |
159 | availability_zones = var.availability_zones
160 | vpc_id = module.vpc.vpc_id
161 | igw_id = [module.vpc.igw_id]
162 | ipv4_cidr_block = [module.vpc.vpc_cidr_block]
163 | nat_gateway_enabled = true
164 | nat_instance_enabled = false
165 |
166 | public_subnets_additional_tags = local.public_subnets_additional_tags
167 | private_subnets_additional_tags = local.private_subnets_additional_tags
168 |
169 | tags = local.tags
170 | context = module.label.context
171 | }
172 |
173 | module "eks_node_group" {
174 | source = "cloudposse/eks-node-group/aws"
175 | # Cloud Posse recommends pinning every module to a specific version
176 | # version = "x.x.x"
177 |
178 | instance_types = [var.instance_type]
179 | subnet_ids = module.subnets.private_subnet_ids
180 | health_check_type = var.health_check_type
181 | min_size = var.min_size
182 | max_size = var.max_size
183 | cluster_name = module.eks_cluster.eks_cluster_id
184 |
185 | # Enable the Kubernetes cluster auto-scaler to find the auto-scaling group
186 | cluster_autoscaler_enabled = var.autoscaling_policies_enabled
187 |
188 | context = module.label.context
189 | }
190 |
191 | module "eks_cluster" {
192 | source = "cloudposse/eks-cluster/aws"
193 | # Cloud Posse recommends pinning every module to a specific version
194 | # version = "x.x.x"
195 |
196 | subnet_ids = concat(module.subnets.private_subnet_ids, module.subnets.public_subnet_ids)
197 | kubernetes_version = var.kubernetes_version
198 | oidc_provider_enabled = true
199 |
200 | addons = [
201 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html#vpc-cni-latest-available-version
202 | {
203 | addon_name = "vpc-cni"
204 | addon_version = var.vpc_cni_version
205 | resolve_conflicts_on_create = "OVERWRITE"
206 | resolve_conflicts_on_update = "OVERWRITE"
207 | service_account_role_arn = var.vpc_cni_service_account_role_arn # Creating this role is outside the scope of this example
208 | },
209 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html
210 | {
211 | addon_name = "kube-proxy"
212 | addon_version = var.kube_proxy_version
213 | resolve_conflicts_on_create = "OVERWRITE"
214 | resolve_conflicts_on_update = "OVERWRITE"
215 | service_account_role_arn = null
216 | },
217 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html
218 | {
219 | addon_name = "coredns"
220 | addon_version = var.coredns_version
221 | resolve_conflicts_on_create = "OVERWRITE"
222 | resolve_conflicts_on_update = "OVERWRITE"
223 | service_account_role_arn = null
224 | },
225 | ]
226 | addons_depends_on = [module.eks_node_group]
227 |
228 | context = module.label.context
229 |
230 | cluster_depends_on = [module.subnets]
231 | }
232 | ```
233 |
234 | Module usage with two unmanaged worker groups:
235 |
236 | ```hcl
237 | locals {
238 | # Unfortunately, the `aws_ami` data source attribute `most_recent` (https://github.com/cloudposse/terraform-aws-eks-workers/blob/34a43c25624a6efb3ba5d2770a601d7cb3c0d391/main.tf#L141)
239 | # does not work as you might expect. If you are not going to use a custom AMI you should
240 | # use the `eks_worker_ami_name_filter` variable to set the right kubernetes version for EKS workers,
241 | # otherwise the first version of Kubernetes supported by AWS (v1.11) for EKS workers will be selected, but the
242 | # EKS control plane will ignore it to use one that matches the version specified by the `kubernetes_version` variable.
243 | eks_worker_ami_name_filter = "amazon-eks-node-${var.kubernetes_version}*"
244 | }
245 |
246 | module "eks_workers" {
247 | source = "cloudposse/eks-workers/aws"
248 | # Cloud Posse recommends pinning every module to a specific version
249 | # version = "x.x.x"
250 |
251 | attributes = ["small"]
252 | instance_type = "t3.small"
253 | eks_worker_ami_name_filter = local.eks_worker_ami_name_filter
254 | vpc_id = module.vpc.vpc_id
255 | subnet_ids = module.subnets.public_subnet_ids
256 | health_check_type = var.health_check_type
257 | min_size = var.min_size
258 | max_size = var.max_size
259 | wait_for_capacity_timeout = var.wait_for_capacity_timeout
260 | cluster_name = module.label.id
261 | cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
262 | cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
263 | cluster_security_group_id = module.eks_cluster.eks_cluster_managed_security_group_id
264 |
265 | # Auto-scaling policies and CloudWatch metric alarms
266 | autoscaling_policies_enabled = var.autoscaling_policies_enabled
267 | cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent
268 | cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent
269 |
270 | context = module.label.context
271 | }
272 |
273 | module "eks_workers_2" {
274 | source = "cloudposse/eks-workers/aws"
275 | # Cloud Posse recommends pinning every module to a specific version
276 | # version = "x.x.x"
277 |
278 | attributes = ["medium"]
279 | instance_type = "t3.medium"
280 | eks_worker_ami_name_filter = local.eks_worker_ami_name_filter
281 | vpc_id = module.vpc.vpc_id
282 | subnet_ids = module.subnets.public_subnet_ids
283 | health_check_type = var.health_check_type
284 | min_size = var.min_size
285 | max_size = var.max_size
286 | wait_for_capacity_timeout = var.wait_for_capacity_timeout
287 | cluster_name = module.label.id
288 | cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
289 | cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
290 | cluster_security_group_id = module.eks_cluster.eks_cluster_managed_security_group_id
291 |
292 | # Auto-scaling policies and CloudWatch metric alarms
293 | autoscaling_policies_enabled = var.autoscaling_policies_enabled
294 | cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent
295 | cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent
296 |
297 | context = module.label.context
298 | }
299 |
300 | module "eks_cluster" {
301 | source = "cloudposse/eks-cluster/aws"
302 | # Cloud Posse recommends pinning every module to a specific version
303 | # version = "x.x.x"
304 |
305 | subnet_ids = concat(module.subnets.private_subnet_ids, module.subnets.public_subnet_ids)
306 | kubernetes_version = var.kubernetes_version
307 | oidc_provider_enabled = true # needed for VPC CNI
308 |
309 | access_entries_for_nodes = {
310 | EC2_LINUX = [module.eks_workers.workers_role_arn, module.eks_workers_2.workers_role_arn]
311 | }
312 |
313 | context = module.label.context
314 | }
315 | ```
316 |
317 | > [!WARNING]
318 | > Release `4.0.0` contains major breaking changes that will require you to update your existing EKS cluster
319 | > and configuration to use this module. Please see the [v3 to v4 migration path](./docs/migration-v3-v4.md) for more information.
320 | > Release `2.0.0` (previously released as version `0.45.0`) contains some changes that,
321 | > if applied to a cluster created with an earlier version of this module,
322 | > could result in your existing EKS cluster being replaced (destroyed and recreated).
323 | > To prevent this, follow the instructions in the [v1 to v2 migration path](./docs/migration-v1-v2.md).
324 |
325 | > [!NOTE]
326 | > Prior to v4 of this module, AWS did not provide an API to manage access to the EKS cluster,
327 | > causing numerous challenges. With v4 of this module, it exclusively uses the AWS API, resolving
328 | > many issues you may read about that had affected prior versions. See the version 2 README and release notes
329 | > for more information on the challenges and workarounds that were required prior to v3.
330 |
331 | include: []
332 | contributors: []
333 |
--------------------------------------------------------------------------------
/atmos.yaml:
--------------------------------------------------------------------------------
1 | # Atmos Configuration — powered by https://atmos.tools
2 | #
3 | # This configuration enables centralized, DRY, and consistent project scaffolding using Atmos.
4 | #
5 | # Included features:
6 | # - Organizational custom commands: https://atmos.tools/core-concepts/custom-commands
7 | # - Automated README generation: https://atmos.tools/cli/commands/docs/generate
8 | #
9 |
10 | # Import shared configuration used by all modules
11 | import:
12 | - https://raw.githubusercontent.com/cloudposse/.github/refs/heads/main/.github/atmos/terraform-module.yaml
13 |
--------------------------------------------------------------------------------
/auth.tf:
--------------------------------------------------------------------------------
1 |
2 | locals {
3 | # Extract the cluster certificate for use in OIDC configuration
4 | certificate_authority_data = try(aws_eks_cluster.default[0].certificate_authority[0]["data"], "")
5 |
6 | eks_policy_short_abbreviation_map = {
7 | # List available policies with `aws eks list-access-policies --output table`
8 |
9 | Admin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy"
10 | ClusterAdmin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
11 | Edit = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy"
12 | View = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
13 | # Add new policies here
14 | }
15 |
16 | eks_policy_abbreviation_map = merge({ for k, v in local.eks_policy_short_abbreviation_map : format("AmazonEKS%sPolicy", k) => v },
17 | local.eks_policy_short_abbreviation_map)
18 |
19 |
20 | # Expand abbreviated access policies to full ARNs
21 | access_entry_expanded_map = { for k, v in var.access_entry_map : k => merge({
22 | # Expand abbreviated policies to full ARNs
23 | access_policy_associations = { for kk, vv in v.access_policy_associations : try(local.eks_policy_abbreviation_map[kk], kk) => vv }
24 | # Copy over all other fields
25 | }, { for kk, vv in v : kk => vv if kk != "access_policy_associations" })
26 | }
27 |
28 | # Replace membership in "system:masters" group with association to "ClusterAdmin" policy
29 | access_entry_map = { for k, v in local.access_entry_expanded_map : k => merge({
30 | # Remove "system:masters" group from standard users
31 | kubernetes_groups = [for group in v.kubernetes_groups : group if group != "system:masters" || v.type != "STANDARD"]
32 | access_policy_associations = merge(
33 | # copy all existing associations
34 | v.access_policy_associations,
35 | # add "ClusterAdmin" policy if the user was in "system:masters" group and is a standard user
36 | contains(v.kubernetes_groups, "system:masters") && v.type == "STANDARD" ? {
37 | "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" = {
38 | access_scope = {
39 | type = "cluster"
40 | namespaces = null
41 | }
42 | }
43 | } : {}
44 | )
45 | # Copy over all other fields
46 | }, { for kk, vv in v : kk => vv if kk != "kubernetes_groups" && kk != "access_policy_associations" })
47 | }
48 |
49 | eks_access_policy_association_product_map = merge(flatten([
50 | for k, v in local.access_entry_map : [for kk, vv in v.access_policy_associations : { format("%s-%s", k, kk) = {
51 | principal_arn = k
52 | policy_arn = kk
53 | }
54 | }]
55 | ])...)
56 | }
57 |
58 | # The preferred way to keep track of entries is by key, but we also support list,
59 | # because keys need to be known at plan time, but list values do not.
60 | resource "aws_eks_access_entry" "map" {
61 | for_each = local.enabled ? local.access_entry_map : {}
62 |
63 | cluster_name = local.eks_cluster_id
64 | principal_arn = each.key
65 | kubernetes_groups = each.value.kubernetes_groups
66 | type = each.value.type
67 |
68 | tags = module.this.tags
69 | }
70 |
71 | resource "aws_eks_access_policy_association" "map" {
72 | for_each = local.enabled ? local.eks_access_policy_association_product_map : {}
73 |
74 | cluster_name = local.eks_cluster_id
75 | principal_arn = each.value.principal_arn
76 | policy_arn = each.value.policy_arn
77 |
78 | access_scope {
79 | type = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.type
80 | namespaces = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.namespaces
81 | }
82 |
83 | depends_on = [
84 | aws_eks_access_entry.map,
85 | aws_eks_access_entry.standard,
86 | aws_eks_access_entry.linux,
87 | aws_eks_access_entry.windows,
88 | ]
89 | }
90 |
91 | # We could combine all the list access entries into a single resource,
92 | # but separating them by category minimizes the ripple effect of changes
93 | # due to adding and removing items from the list.
94 | resource "aws_eks_access_entry" "standard" {
95 | count = local.enabled ? length(var.access_entries) : 0
96 |
97 | cluster_name = local.eks_cluster_id
98 | principal_arn = var.access_entries[count.index].principal_arn
99 | kubernetes_groups = var.access_entries[count.index].kubernetes_groups
100 | type = "STANDARD"
101 |
102 | tags = module.this.tags
103 | }
104 |
105 | resource "aws_eks_access_entry" "linux" {
106 | count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_LINUX", [])) : 0
107 |
108 | cluster_name = local.eks_cluster_id
109 | principal_arn = var.access_entries_for_nodes.EC2_LINUX[count.index]
110 | type = "EC2_LINUX"
111 |
112 | tags = module.this.tags
113 | }
114 |
115 | resource "aws_eks_access_entry" "windows" {
116 | count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_WINDOWS", [])) : 0
117 |
118 | cluster_name = local.eks_cluster_id
119 | principal_arn = var.access_entries_for_nodes.EC2_WINDOWS[count.index]
120 | type = "EC2_WINDOWS"
121 |
122 | tags = module.this.tags
123 | }
124 |
125 | resource "aws_eks_access_policy_association" "list" {
126 | count = local.enabled ? length(var.access_policy_associations) : 0
127 |
128 | cluster_name = local.eks_cluster_id
129 | principal_arn = var.access_policy_associations[count.index].principal_arn
130 | policy_arn = try(local.eks_policy_abbreviation_map[var.access_policy_associations[count.index].policy_arn],
131 | var.access_policy_associations[count.index].policy_arn)
132 |
133 | access_scope {
134 | type = var.access_policy_associations[count.index].access_scope.type
135 | namespaces = var.access_policy_associations[count.index].access_scope.namespaces
136 | }
137 |
138 | depends_on = [
139 | aws_eks_access_entry.map,
140 | aws_eks_access_entry.standard,
141 | aws_eks_access_entry.linux,
142 | aws_eks_access_entry.windows,
143 | ]
144 | }
145 |
--------------------------------------------------------------------------------
/context.tf:
--------------------------------------------------------------------------------
1 | #
2 | # ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label
3 | # All other instances of this file should be a copy of that one
4 | #
5 | #
6 | # Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf
7 | # and then place it in your Terraform module to automatically get
8 | # Cloud Posse's standard configuration inputs suitable for passing
9 | # to Cloud Posse modules.
10 | #
11 | # curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf
12 | #
13 | # Modules should access the whole context as `module.this.context`
14 | # to get the input variables with nulls for defaults,
15 | # for example `context = module.this.context`,
16 | # and access individual variables as `module.this.`,
17 | # with final values filled in.
18 | #
19 | # For example, when using defaults, `module.this.context.delimiter`
20 | # will be null, and `module.this.delimiter` will be `-` (hyphen).
21 | #
22 |
23 | module "this" {
24 | source = "cloudposse/label/null"
25 | version = "0.25.0" # requires Terraform >= 0.13.0
26 |
27 | enabled = var.enabled
28 | namespace = var.namespace
29 | tenant = var.tenant
30 | environment = var.environment
31 | stage = var.stage
32 | name = var.name
33 | delimiter = var.delimiter
34 | attributes = var.attributes
35 | tags = var.tags
36 | additional_tag_map = var.additional_tag_map
37 | label_order = var.label_order
38 | regex_replace_chars = var.regex_replace_chars
39 | id_length_limit = var.id_length_limit
40 | label_key_case = var.label_key_case
41 | label_value_case = var.label_value_case
42 | descriptor_formats = var.descriptor_formats
43 | labels_as_tags = var.labels_as_tags
44 |
45 | context = var.context
46 | }
47 |
48 | # Copy contents of cloudposse/terraform-null-label/variables.tf here
49 |
50 | variable "context" {
51 | type = any
52 | default = {
53 | enabled = true
54 | namespace = null
55 | tenant = null
56 | environment = null
57 | stage = null
58 | name = null
59 | delimiter = null
60 | attributes = []
61 | tags = {}
62 | additional_tag_map = {}
63 | regex_replace_chars = null
64 | label_order = []
65 | id_length_limit = null
66 | label_key_case = null
67 | label_value_case = null
68 | descriptor_formats = {}
69 | # Note: we have to use [] instead of null for unset lists due to
70 | # https://github.com/hashicorp/terraform/issues/28137
71 | # which was not fixed until Terraform 1.0.0,
72 | # but we want the default to be all the labels in `label_order`
73 | # and we want users to be able to prevent all tag generation
74 | # by setting `labels_as_tags` to `[]`, so we need
75 | # a different sentinel to indicate "default"
76 | labels_as_tags = ["unset"]
77 | }
78 | description = <<-EOT
79 | Single object for setting entire context at once.
80 | See description of individual variables for details.
81 | Leave string and numeric variables as `null` to use default value.
82 | Individual variable settings (non-null) override settings in context object,
83 | except for attributes, tags, and additional_tag_map, which are merged.
84 | EOT
85 |
86 | validation {
87 | condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
88 | error_message = "Allowed values: `lower`, `title`, `upper`."
89 | }
90 |
91 | validation {
92 | condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
93 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
94 | }
95 | }
96 |
97 | variable "enabled" {
98 | type = bool
99 | default = null
100 | description = "Set to false to prevent the module from creating any resources"
101 | }
102 |
103 | variable "namespace" {
104 | type = string
105 | default = null
106 | description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique"
107 | }
108 |
109 | variable "tenant" {
110 | type = string
111 | default = null
112 | description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for"
113 | }
114 |
115 | variable "environment" {
116 | type = string
117 | default = null
118 | description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'"
119 | }
120 |
121 | variable "stage" {
122 | type = string
123 | default = null
124 | description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'"
125 | }
126 |
127 | variable "name" {
128 | type = string
129 | default = null
130 | description = <<-EOT
131 | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
132 | This is the only ID element not also included as a `tag`.
133 | The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
134 | EOT
135 | }
136 |
137 | variable "delimiter" {
138 | type = string
139 | default = null
140 | description = <<-EOT
141 | Delimiter to be used between ID elements.
142 | Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
143 | EOT
144 | }
145 |
146 | variable "attributes" {
147 | type = list(string)
148 | default = []
149 | description = <<-EOT
150 | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
151 | in the order they appear in the list. New attributes are appended to the
152 | end of the list. The elements of the list are joined by the `delimiter`
153 | and treated as a single ID element.
154 | EOT
155 | }
156 |
157 | variable "labels_as_tags" {
158 | type = set(string)
159 | default = ["default"]
160 | description = <<-EOT
161 | Set of labels (ID elements) to include as tags in the `tags` output.
162 | Default is to include all labels.
163 | Tags with empty values will not be included in the `tags` output.
164 | Set to `[]` to suppress all generated tags.
165 | **Notes:**
166 | The value of the `name` tag, if included, will be the `id`, not the `name`.
167 | Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
168 | changed in later chained modules. Attempts to change it will be silently ignored.
169 | EOT
170 | }
171 |
172 | variable "tags" {
173 | type = map(string)
174 | default = {}
175 | description = <<-EOT
176 | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
177 | Neither the tag keys nor the tag values will be modified by this module.
178 | EOT
179 | }
180 |
181 | variable "additional_tag_map" {
182 | type = map(string)
183 | default = {}
184 | description = <<-EOT
185 | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
186 | This is for some rare cases where resources want additional configuration of tags
187 | and therefore take a list of maps with tag key, value, and additional configuration.
188 | EOT
189 | }
190 |
191 | variable "label_order" {
192 | type = list(string)
193 | default = null
194 | description = <<-EOT
195 | The order in which the labels (ID elements) appear in the `id`.
196 | Defaults to ["namespace", "environment", "stage", "name", "attributes"].
197 | You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present.
198 | EOT
199 | }
200 |
201 | variable "regex_replace_chars" {
202 | type = string
203 | default = null
204 | description = <<-EOT
205 | Terraform regular expression (regex) string.
206 | Characters matching the regex will be removed from the ID elements.
207 | If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
208 | EOT
209 | }
210 |
211 | variable "id_length_limit" {
212 | type = number
213 | default = null
214 | description = <<-EOT
215 | Limit `id` to this many characters (minimum 6).
216 | Set to `0` for unlimited length.
217 | Set to `null` for keep the existing setting, which defaults to `0`.
218 | Does not affect `id_full`.
219 | EOT
220 | validation {
221 | condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
222 | error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
223 | }
224 | }
225 |
226 | variable "label_key_case" {
227 | type = string
228 | default = null
229 | description = <<-EOT
230 | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
231 | Does not affect keys of tags passed in via the `tags` input.
232 | Possible values: `lower`, `title`, `upper`.
233 | Default value: `title`.
234 | EOT
235 |
236 | validation {
237 | condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
238 | error_message = "Allowed values: `lower`, `title`, `upper`."
239 | }
240 | }
241 |
242 | variable "label_value_case" {
243 | type = string
244 | default = null
245 | description = <<-EOT
246 | Controls the letter case of ID elements (labels) as included in `id`,
247 | set as tag values, and output by this module individually.
248 | Does not affect values of tags passed in via the `tags` input.
249 | Possible values: `lower`, `title`, `upper` and `none` (no transformation).
250 | Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
251 | Default value: `lower`.
252 | EOT
253 |
254 | validation {
255 | condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
256 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
257 | }
258 | }
259 |
260 | variable "descriptor_formats" {
261 | type = any
262 | default = {}
263 | description = <<-EOT
264 | Describe additional descriptors to be output in the `descriptors` output map.
265 | Map of maps. Keys are names of descriptors. Values are maps of the form
266 | `{
267 | format = string
268 | labels = list(string)
269 | }`
270 | (Type is `any` so the map values can later be enhanced to provide additional options.)
271 | `format` is a Terraform format string to be passed to the `format()` function.
272 | `labels` is a list of labels, in order, to pass to `format()` function.
273 | Label values will be normalized before being passed to `format()` so they will be
274 | identical to how they appear in `id`.
275 | Default is `{}` (`descriptors` output will be empty).
276 | EOT
277 | }
278 |
279 | #### End of copy of cloudposse/terraform-null-label/variables.tf
280 |
--------------------------------------------------------------------------------
/docs/migration-0.45.x+.md:
--------------------------------------------------------------------------------
1 | # Migration to 0.45.x+
2 |
3 | Version `0.45.0` has been re-released as v2.0.0 and the migration documentation moved to [migration-v1-v2.md](migration-v1-v2.md)
4 |
--------------------------------------------------------------------------------
/docs/migration-v1-v2.md:
--------------------------------------------------------------------------------
1 | # Migration From Version 1 to Version 2
2 |
3 | Version 2 (a.k.a version 0.45.0) of this module introduces potential breaking changes that, without taking additional precautions, could cause the EKS cluster to be recreated.
4 |
5 | ## Background
6 |
7 | This module creates an EKS cluster, which automatically creates an EKS-managed Security Group in which all managed nodes are placed automatically by EKS, and unmanaged nodes could be placed
8 | by the user, to ensure the nodes and control plane can communicate.
9 |
10 | Before version 2, this module, by default, created an additional Security Group. Prior to version `0.19.0` of this module, that additional Security Group was the only one exposed by
11 | this module (because EKS at the time did not create the managed Security Group for the cluster), and it was intended that all worker nodes (managed and unmanaged) be placed in this
12 | additional Security Group. With version `0.19.0`, this module exposed the managed Security Group created by the EKS cluster, in which all managed node groups are placed by default. We now
13 | recommend placing non-managed node groups in the EKS-created Security Group
14 | as well by using the `eks_cluster_managed_security_group_id` output to
15 | associate the node groups with it, and not create an additional Security Group.
16 |
17 | See https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html for more details.
18 |
19 | ## Migration process
20 |
21 | If you are deploying a new EKS cluster with this module, no special steps need to be taken. Just keep the variable `create_security_group` set to `false` to not create an additional Security
22 | Group. Don't use the deprecated variables (see `variables-deprecated.tf`).
23 |
24 | If you are updating this module to the latest version on existing (already deployed) EKS clusters, set the variable `create_security_group` to `true` to enable the additional Security Group
25 | and all the rules (which were enabled by default in the previous releases of this module).
26 |
27 | ## Deprecated variables
28 |
29 | Some variables have been deprecated (see `variables-deprecated.tf`), don't use them when creating new EKS clusters.
30 |
31 | - Use `allowed_security_group_ids` instead of `allowed_security_groups` and `workers_security_group_ids`
32 |
33 | - When using unmanaged worker nodes (e.g. with https://github.com/cloudposse/terraform-aws-eks-workers module), provide the worker nodes Security Groups to the cluster using
34 | the `allowed_security_group_ids` variable, for example:
35 |
36 | ```hcl
37 | module "eks_workers" {
38 | source = "cloudposse/eks-workers/aws"
39 | }
40 |
41 | module "eks_workers_2" {
42 | source = "cloudposse/eks-workers/aws"
43 | }
44 |
45 | module "eks_cluster" {
46 | source = "cloudposse/eks-cluster/aws"
47 | allowed_security_group_ids = [module.eks_workers.security_group_id, module.eks_workers_2.security_group_id]
48 | }
49 | ```
50 |
--------------------------------------------------------------------------------
/docs/migration-v3-v4.md:
--------------------------------------------------------------------------------
1 | # Migration From Version 2 or 3 to Version 4
2 |
3 | Users new to this module can skip this document and proceed to the main README.
4 | This document is for users who are updating from version 2 or 3 to version 4.
5 | Unfortunately, there is no "tl;dr" for this document, as the changes are
6 | substantial and require careful consideration.
7 |
8 | This guide consists of 4 parts:
9 |
10 | 1. [Summary and Background](#summary-and-background): A brief overview of the
11 | changes in version 4, what motivated them, and what they mean for you.
12 | 2. [Configuration Migration Overview](#configuration-migration-overview): A
13 | high-level overview of the changes you will need to make to your
14 | configuration to update to version 4. The inputs to this module have
15 | changed substantially, and you will need to update your configuration to
16 | match the new inputs.
17 | 3. [Configuration Migration Details](#configuration-migration-details): A
18 | detailed explanation of the changes you will need to make to your
19 | configuration to update to version 4.
20 | 4. [Cluster Migration Steps](#cluster-migration-steps): Detailed instructions
21 | for migrating your EKS cluster to be managed by version 4. After you have
22 | updated your configuration, you will still need to take some additional
23 | manual steps to have Terraform upgrade and manage your existing EKS
24 | cluster with the new version 4 configuration. This step can be skipped if
25 | you can tolerate simply creating a new EKS cluster and deleting the old one.
26 |
27 |
28 | ## Usage notes
29 |
30 | #### Critical usage notes:
31 |
32 | > [!CAUTION]
33 | > #### Close security loophole by migrating all the way to "API" mode
34 | >
35 | > In order for automatic conversions to take place, AWS requires that you
36 | > migrate in 2 steps: first to `API_AND_CONFIG_MAP` mode and then to `API` mode.
37 | > In the migration steps documented here, we abandon the `aws-auth` ConfigMap
38 | > in place, with its existing contents, and add the new access control
39 | > entries. In order to remove any access granted by the `aws-auth` ConfigMap,
40 | > you must complete the migration to "API" mode. Even then, the `aws-auth`
41 | > ConfigMap will still exist, but it will be ignored. You can then delete it manually.
42 |
43 | > [!WARNING]
44 | > #### WARNING: Do not manage Kubernetes resources in the same configuration
45 | >
46 | > Hopefully, and likely, the following does not apply to you, but just in case:
47 | >
48 | > It has always been considered a bad practice to manage resources
49 | > created by one resource (in this case, the EKS cluster) with another
50 | > resource (in this case resources provided by the `kubernetes` or
51 | > `helm` providers) in the same Terraform configuration, because of
52 | > issues with lifecycle management, timing, atomicity, etc. This
53 | > `eks-cluster` module used to do it anyway because it was the only
54 | > way to manage access control for the EKS cluster, but it did suffer
55 | > from those issues. Now that it is no longer necessary, the module no
56 | > longer does it, and it is a requirement that you remove the
57 | > "kubernetes" and "helm" providers from your root module or
58 | > component, if present, and therefore any `kubernetes_*` or `helm_*`
59 | > resources that were being managed by it. In most cases, this will be a
60 | > non-issue, because you should already be managing such resources
61 | > elsewhere, but if you had been integrating Kubernetes deployments into your
62 | > EKS cluster configuration and find changing that too challenging, then you
63 | > should delay the upgrade to version 4 of this module until you can address it.
64 |
65 | #### Recommendations:
66 |
67 | The following recommendations apply to both new and existing users of this module:
68 |
69 | - We recommend leaving `bootstrap_cluster_creator_admin_permissions` set to
70 | `false`. When set to `true`, EKS automatically adds an access entry for the
71 | EKS cluster creator during creation, but this interferes with Terraform's
72 | management of the access entries, and it is not recommended for Terraform
73 | users. Note that now that there is an API for managing access to the EKS
74 | cluster, it is no longer necessary to have admin access to the cluster in
75 | order to manage access to it. You only need to have the separate IAM
76 | permission [`eks:CreateAccessEntry`](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateAccessEntry.html)
77 | to add an access entry to the cluster and `eks:AssociateAccessPolicy` to give
78 | that entry ClusterAdmin permissions.
79 | - As of the release of version 4 of this module, it remains an issue that
80 | AWS Identity Center auto-generates IAM roles with non-deterministic ARNs
81 | to correspond to Permission Sets. Changes to the Permission Set will cause
82 | the ARN of the corresponding IAM role to change. This will invalidate any
83 | EKS Access Entry that used the old IAM role ARN, requiring you to remove
84 | the old access entry and add the new one. Follow [`containers-roadmap`
85 | issue 474](https://github.com/aws/containers-roadmap/issues/474) for
86 | updates on features that will mitigate this issue. Until then, we recommend
87 | you create a regular IAM role with a deterministic ARN and use that in your
88 | EKS Access Entries, and then giving Permission Sets the necessary permissions
89 | to assume that role.
90 | - For new clusters, we recommend setting `access_config.authentication_mode
91 | = "API"` to use the new access control API exclusively, so that is the
92 | default. However, AWS does not support a direct upgrade from the legacy
93 | `CONFIG_MAP` mode to the `API` mode, so when upgrading an existing EKS
94 | cluster, you must manually configure the `API_AND_CONFIG_MAP` mode for the initial upgrade.
95 |
96 | ## Summary and Background
97 |
98 | Version 4 of this module introduces several breaking changes that will
99 | require updates to your existing configuration. Major changes include:
100 |
101 | - Removal of any management of the `aws-auth` ConfigMap. This module now
102 | uses the AWS API to manage access to the EKS cluster, and no longer interacts
103 | with the ConfigMap directly in any way.
104 | - Removal of the Kubernetes Terraform provider. It was only used to interact with
105 | the `aws-auth` ConfigMap, and is no longer necessary.
106 | - Addition of Kubernetes access control via the AWS API, specifically
107 | Access Entries and Associated Access Policies.
108 | - Replacement of inputs associated with configuring the `aws-auth` ConfigMap
109 | with new inputs for configuring access control using the new AWS API. This
110 | was done in part to ensure that there is no ambiguity about which format
111 | of IAM Principal ARN is required, and what restrictions apply to the
112 | Kubernetes group memberships.
113 | - Restoration of the path component in any IAM Principal ARNs. When using
114 | the legacy `aws-auth` ConfigMap, the path component in any IAM Principal
115 | ARN had to be removed from the ARN, and the modified ARN was used in the
116 | ConfigMap. This was a workaround for a limitation in the AWS
117 | Implementation. With full AWS API support for access control, the path
118 | component is no longer removed, and the full ARN is required.
119 | - Removal of any support for creating an additional Security Group for
120 | worker nodes. This module now only allows some addition of rules to the
121 | EKS-managed Security Group. Normally you would associate all worker nodes
122 | with that Security Group. (Worker nodes can be associated with additional
123 | Security Groups as well if desired.). This includes the removal of the
124 | `vpc_id` input, which was only needed for creating the additional Security
125 | Group.
126 | - Replacement of `aws_security_group_rule` resources with the newer
127 | `aws_vpc_security_group_ingress_rule` resources for adding ingress rules to
128 | the EKS-managed Security Group. For people who were adding ingress rules to
129 | the EKS-managed Security Group, This will cause a brief interruption in
130 | communication as the old rules are removed and the new rules are added. The
131 | benefit is that you can then use the new
132 | `aws_vpc_security_group_ingress_rule` and
133 | `aws_vpc_security_group_egress_rule` resources to manage the rules in your
134 | root module or a separate component, allowing you much more control and
135 | flexibility over the rules than this module provides.
136 |
137 | ### Access to the EKS cluster
138 |
139 | The primary credential used for accessing any AWS resource is your AWS IAM
140 | user or role, more generally referred to as an IAM principal. Previously,
141 | EKS clusters contained a Kubernetes ConfigMap called `aws-auth` that was used
142 | to map IAM principals to Kubernetes RBAC roles. This was the only way to
143 | grant access to the EKS cluster, and this module managed the `aws-auth` ConfigMap
144 | for you. However, managing a Kubernetes resource from Terraform was not ideal,
145 | and managing any resource created by another resource in the same Terraform
146 | configuration is not supported by Terraform. Prior to v4, this module relied
147 | on a series of tricks to get around these limitations, but it was far from
148 | a complete solution.
149 |
150 | In v4, this module now uses the [new AWS API](https://github.com/aws/containers-roadmap/issues/185#issuecomment-1863025784)
151 | to manage access to the EKS cluster and no longer interacts with the
152 | `aws-auth` ConfigMap directly.
153 |
154 | ### Security Groups
155 |
156 | This module creates an EKS cluster, which automatically creates an EKS-managed
157 | Security Group in which all managed nodes are placed automatically by EKS, and
158 | unmanaged nodes could be placed by the user, to ensure the nodes and control
159 | plane can communicate.
160 |
161 | In version 2, there was legacy support for creating an additional Security Group
162 | for worker nodes. (See the [version 2 migration documentation]
163 | (migration-v1-v2.md) for more information about the legacy support.)
164 | This support has been removed in version 4, and this module now only supports
165 | some configuration of the EKS-managed Security Group, enabled by the
166 | `managed_security_group_rules_enabled` variable.
167 |
168 |
169 | ## Configuration Migration Overview
170 |
171 | If you are deploying a new EKS cluster with this module, no special steps
172 | need to be taken, although we recommend setting
173 | `access_config.authentication_mode = "API"` to use the new access control
174 | API exclusively. By default, the module enables both the API and the `aws-auth`
175 | ConfigMap to allow for a smooth transition from the old method to the new one.
176 |
177 | ### Removed variables
178 |
179 | - Variables deprecated in version 2 have been removed in version 4. These
180 | include anything related to creating or managing a Security Group
181 | distinct from the one automatically created for the cluster by EKS.
182 |
183 | - Any variables relating to the Kubernetes Terraform provider or the
184 | `aws-auth` ConfigMap have been removed, and the provider itself has been
185 | removed.
186 |
187 | - Any variables configuring access to the EKS cluster, such
188 | as `map_additional_iam_roles` and `workers_role_arns`, have been removed and
189 | replaced with new variables with names starting with `access_` that configure
190 | access control using the new AWS API.
191 |
192 | ### Removed outputs
193 |
194 | - The `kubernetes_config_map_id` output has been removed, as the module no
195 | longer manages the `aws-auth` ConfigMap. If you had been using this output
196 | to "depend_on" before creating other resources, you probably no longer
197 | need to configure an explicit dependency.
198 |
199 | - Any outputs related to the additional Security Group have been removed.
200 |
201 | ## Configuration Migration Steps
202 |
203 | ### Access Control Configuration
204 |
205 | The primary change in version 4 is the new way of configuring access to the
206 | EKS cluster. This is done using the new AWS API for managing access to the
207 | EKS cluster, specifically Access Entries and Associated Access Policies.
208 | To support the transition of existing clusters, AWS now allows the cluster
209 | to be in one of 3 configuration modes: "CONFIG_MAP", "API", or
210 | "API_AND_CONFIG_MAP". This module defaults to "API", which is the recommended
211 | configuration for new clusters. However, existing clusters will be using the
212 | "CONFIG_MAP" configuration (previously the only option available), and AWS
213 | does not support direct upgrade from "CONFIG_MAP" to "API". Therefore:
214 |
215 |
216 | > [!NOTE]
217 | > #### You cannot directly upgrade from "CONFIG_MAP" to "API"
218 | >
219 | > When updating an existing cluster, you will need to set `authentication_mode`
220 | > to "API_AND_CONFIG_MAP" in your configuration, and then update the cluster.
221 | > After the cluster has been updated, you can set `authentication_mode` to
222 | > the default value of "API" and update the cluster again, but you cannot
223 | > directly upgrade from "CONFIG_MAP" to "API".
224 |
225 | ### Consideration: Information Known and Unknown at Plan Time
226 |
227 | Previously, all access control information could be unknown at plan time
228 | without causing any problems, because at plan time Terraform only cares about
229 | whether a resource (technically, a resource address) is created or not, and the
230 | single `aws-auth` ConfigMap was always created.
231 |
232 | Now, each piece of the access control configuration is a separate resource,
233 | which means it has to be created via either `count` or `for_each`. There are
234 | tradeoffs to both approaches, so you will have to decide which is best for
235 | your situation. See [Count vs For Each](https://docs.cloudposse.com/reference/terraform-in-depth/terraform-count-vs-for-each/)
236 | for a discussion of the issues that arise when creating resources from
237 | lists using `count`.
238 |
239 | To configure access using `for_each`, you can use the `access_entry_map` input.
240 | This is the preferred approach, as it keeps any entry from changing
241 | unnecessarily, but it requires that all IAM principal ARNs, Kubernetes group
242 | memberships, and EKS access policy ARNs are known at plan time, and that
243 | none of them are designated as "sensitive".
244 |
245 | If you cannot use `access_entry_map` for some entries, you can use it for
246 | the ones that are known at plan time and use the pair of inputs `access_entries`
247 | and `access_policy_associations` for the ones that are not. These inputs
248 | take lists, and resources are created via `count`. There is a separate
249 | list-based input for self-managed nodes, `access_entries_for_nodes`, because
250 | nodes are managed differently from other access entries.
251 |
252 | These list-based inputs only require you know the number of entries at plan
253 | time, not the specific entries themselves. However, this still means you cannot
254 | use functions that can modify the length of the list, such as `compact` or,
255 | [prior to Terraform v1.6.0, `sort`](https://github.com/hashicorp/terraform/issues/31035).
256 | See [Explicit Transformations of Lists](https://docs.cloudposse.com/reference/terraform-in-depth/terraform-unknown-at-plan-time/#explicit-transformations-of-lists)
257 | for more information on limitations on list transformations.
258 |
259 | ### Migrating Access for Standard Users
260 |
261 | Access for standard users is now configured using a combination of
262 | Kubernetes RBAC settings and the new [AWS EKS Access Policies](https://docs.aws.amazon.com/eks/latest/userguide/access-policies.html#access-policy-permissions).
263 | As explained above under [Consideration: Information Known and Unknown at Plan Time](#consideration-information-known-and-unknown-at-plan-time),
264 | there are both map-based and list-based inputs for configuring access.
265 |
266 | Whereas previously your only option was to assign IAM Principals to Kubernetes
267 | RBAC Groups, you can now also associate IAM Principals with EKS Access Policies.
268 |
269 | Unfortunately, migration from the old method to the new one is not as
270 | straightforward as we would like.
271 |
272 | > [!WARNING]
273 | > #### Restoration of the Path Component in IAM Principal ARNs
274 | >
275 | > Previously, when using the `aws-auth` ConfigMap, the path component in any
276 | > IAM Principal ARN had to be removed from the ARN, and the modified ARN was
277 | > used in the ConfigMap. Quoting from the [AWS EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html#aws-auth-users):
278 | > > The role ARN [used in `aws-auth`] can't include a path such as
279 | > >`role/my-team/developers/my-role`. The format of the ARN must be
280 | > >`arn:aws:iam::111122223333:role/my-role`. In this example,
281 | > > `my-team/developers/` needs to be removed.
282 | >
283 | > This was a workaround for a limitation in the AWS Implementation. With
284 | > full AWS API support for access control, the path component is no longer
285 | > removed, and the full ARN is required.
286 | >
287 | > If you had been using the `aws-auth` ConfigMap, you should have been
288 | > removing the path component either manually as part of your static
289 | > configuration, or programmatically. **You will need to undo these
290 | > transformations and provide the full ARN in the new configuration.**
291 |
292 | ### Migrating from Kubernetes RBAC Groups to EKS Access Policies
293 |
294 | #### EKS Access Policy ARNs, Names, and Abbreviations
295 |
296 | Previously, the only way to specify access to the EKS cluster was to assign
297 | IAM Principals to Kubernetes RBAC Groups. Now, you can also associate IAM
298 | Principals with EKS Access Policies. Full EKS Access Policy ARNs can be
299 | listed via the AWS CLI with the command `aws eks list-access-policies` and
300 | look like `arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy`. In
301 | AWS documentation and some other contexts, these policies are referred by
302 | name, for example `AmazonEKSAdminPolicy`. The name is the last component of
303 | the ARN, and always matches the regex `^AmazonEKS(.*)Policy$`.
304 |
305 | In this module, wherever an EKS Access Policy ARN is required, you can use
306 | the full ARN, the full name (e.g. "AmazonEKSAdminPolicy"), or the
307 | abbreviated name (e.g. "Admin"). The abbreviated name is the `$1` part of the
308 | regex `^AmazonEKS(.*)Policy$`. This document will usually use the abbreviated
309 | name.
310 |
311 | #### Changes to Kubernetes RBAC Groups
312 |
313 | Previously, we created cluster administrators by assigning them to
314 | the `system:masters` group. With the new AWS API, we can no longer assign any
315 | users to any of the `system:*` groups. We have to create Cluster Administrators
316 | by associating the ClusterAdmin policy with them, with type `cluster`.
317 |
318 | > [!TIP]
319 | > ##### This module gives legacy support to the `system:masters` group
320 | >
321 | > As a special case, the `system:masters` Kubernetes group is still supported by
322 | > this module, but only when using `access_entry_map` and `type = "STANDARD"`. In
323 | > this case, the `system:masters` group is automatically replaced with an
324 | > association with the `ClusterAdmin` policy.
325 |
326 | > [!NOTE]
327 | > #### No special legacy support is provided for `access_entries`
328 | >
329 | > Note that this substitution is not done for `access_entries` because the
330 | > use case for `access_entries` is when values are not known at plan time,
331 | > and the substitution requires knowing the value at plan time.
332 |
333 | Any other `system:*` groups, such as `system:bootstrappers` or `system:nodes`
334 | must be removed. (Those specific groups are assigned automatically by AWS
335 | when using `type` other than `STANDARD`.)
336 |
337 | If you had been assigning users to any other Kubernetes RBAC groups, you can
338 | continue to do so, and we recommend it.
339 | At Cloud Posse, we have found that the pre-defined `view` and `edit` groups
340 | are unsatisfactory, because they do not allow access to Custom Resources,
341 | and we expect the same limitations will make the View and Edit EKS Access
342 | Policies unsatisfactory. We bypass these limitations by creating our own
343 | groups and roles, and by enhancing the `view` role using the label:
344 |
345 | ```
346 | rbac.authorization.k8s.io/aggregate-to-view: "true"
347 | ```
348 |
349 | It is not clear whether changes to the `view` role affect the View EKS Access
350 | Policy, but we expect that they do not, which is why we recommend continuing
351 | to use Kubernetes RBAC groups for roles other than ClusterAdmin and Admin.
352 |
353 | ### Migrating Access for Self-Managed Nodes
354 |
355 | There is almost nothing to configure to grant access to the EKS cluster for
356 | nodes, as AWS handles everything fully automatically for EKS-managed nodes
357 | and Fargate nodes.
358 |
359 | For self-managed nodes (which we no longer recommend using), you can use the
360 | `access_entries_for_nodes` input, which is a pair of lists, one for Linux worker
361 | nodes and one for Windows worker nodes. AWS manages all the access for these
362 | nodes, so you only need to provide the IAM roles that the nodes will assume;
363 | there is nothing else to configure.
364 |
365 | The `access_entries_for_nodes` input roughly corresponds to the removed
366 | `workers_role_arns` input, but requires separating Linux workers from
367 | Windows workers. There is no longer a need to configure Fargate nodes at all,
368 | as that is fully automatic in the same way that EKS managed nodes are.
369 |
370 | #### Example Access Entry Migration
371 |
372 | Here is an example of how you might migrate access configuration from version
373 | 3 to version 4. If you previously had a configuration like this:
374 |
375 | ```hcl
376 | map_additional_iam_roles = [
377 | {
378 | rolearn = replace(data.aws_iam_role.administrator_access.arn, "${data.aws_iam_role.administrator_access.path}/", "")
379 | username = "devops"
380 | groups = ["system:masters", "devops"]
381 | },
382 | {
383 | rolearn = data.aws_iam_role.gitlab_ci.arn
384 | username = "gitlab-ci"
385 | groups = ["system:masters", "ci"]
386 | },
387 | {
388 | rolearn = aws_iam_role.karpenter_node.arn
389 | username = "system:node:{{EC2PrivateDNSName}}"
390 | groups = ["system:bootstrappers", "system:nodes"]
391 | },
392 | {
393 | rolearn = aws_iam_role.fargate.arn
394 | username = "system:node:{{SessionName}}"
395 | groups = ["system:bootstrappers", "system:nodes", "system:node-proxier"]
396 | },
397 | ]
398 | ```
399 |
400 | You can migrate it as follows. Remember, you have the option of keeping
401 | `systems:masters` as a Kubernetes group when using `access_entry_map`, but we
402 | do not recommend that, as it is only provided for backwards compatibility,
403 | and is otherwise a confusing wart that may eventually be removed.
404 |
405 | Also note that we have removed the username for `devops` as a [best practice
406 | when using roles](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html#creating-access-entries),
407 | and we recommend you only use usernames for users. We kept the username for
408 | `gitlab-ci` only so you would have an example.
409 |
410 | The new map-based configuration, using defaults, and showing how to set up
411 | ClusterAdmin with and without `systems:masters`:
412 |
413 | ```hcl
414 | access_entry_map = {
415 | # Note that we no longer remove the path!
416 | (data.aws_iam_role.administrator_access.arn) = {
417 | kubernetes_groups = ["devops"]
418 | access_policy_associations = {
419 | ClusterAdmin = {}
420 | }
421 | }
422 | (data.aws_iam_role.gitlab_ci.arn) = {
423 | kubernetes_groups = ["systems:masters", "ci"]
424 | user_name = "gitlab-ci"
425 | }
426 | }
427 | # Use access_entries_for_nodes for self-managed node groups
428 | access_entries_for_nodes = {
429 | EC2_LINUX = [aws_iam_role.karpenter_node.arn]
430 | }
431 | # No need to configure Fargate nodes
432 | ```
433 |
434 | ## Cluster Migration Steps
435 |
436 | ### Pt. 1: Prepare Your Configuration
437 |
438 | > [!NOTE]
439 | > #### Usage note: Using `terraform` or `atmos`
440 | >
441 | > If you are using `atmos`, you have a choice of running every command under
442 | > `atmos`, or running the `atmos terraform shell` command to set up your
443 | > environment to run Terraform commands. Normally, we recommend `atmos`
444 | > users run all the commands under `atmos`, but this document needs to
445 | > support users not using `atmos` and instead using `terraform` only, and so
446 | > may err on the side of providing only `terraform` commands at some points.
447 | >
448 | > Terraform users are expected to add any necessary steps or arguments (such
449 | > as selecting a workspace or adding a `-var-file` argument) to the commands
450 | > given. Atmos users need to substitute the component and stack names in
451 | > each command. If you are using `atmos`, you can use the following command to
452 | > set up your environment to run Terraform commands:
453 | >
454 | > ```shell
455 | > atmos terraform shell -s
456 | > ```
457 | >
458 | > After running this command, you will be in a subshell with the necessary
459 | > environment variables set to run Terraform without extra arguments. You can
460 | > exit the subshell by typing `exit`.
461 | >
462 | > One caveat is that if you want to run `terraform apply ` in an
463 | > `atmos` sub-shell, you will need to temporarily unset the `TF_CLI_ARGS_apply`
464 | > environment variable, which sets a `-var-file` argument that is not allowed when applying a plan:
465 | >
466 | > ```
467 | > # inside the atmos subshell
468 | > $ terraform apply
469 | > │ Error: Can't set variables when applying a saved plan
470 | > │
471 | > │ The -var and -var-file options cannot be used when applying a saved plan
472 | > file, because a saved plan includes the variable values that were set when it was created.
473 | >
474 | > $ TF_CLI_ARGS_apply= terraform apply
475 | > # command runs as normal
476 | > ```
477 | >
478 |
479 |
480 | #### Ensure your cluster satisfies the prerequisites
481 |
482 | Verify that your cluster satisfies [the AWS prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html#access-entries-prerequisites) for using the new access control API.
483 |
484 | Verify that you are not using the `kubernetes` or `helm` provider in your root
485 | module, or managing any Kubernetes resources (including Helm charts). Run:
486 |
487 | ```shell
488 | terraform state list | grep -E 'helm_|kubernetes_'
489 | ```
490 |
491 |
492 | There should only be one resource output from this command, either `aws_auth[0]`
493 | or `aws_auth_ignore_changes[0]`, which is created by earlier versions
494 | of this module. If there are more resources listed, you need to investigate
495 | further to find and remove the source of the resource. Any other
496 | `kubernetes_*` resources (and any `helm_*` resources) are coming from other
497 | places and need to be moved or removed before upgrading. You should not
498 | attempt an upgrade to version 4 until you have moved or removed management
499 | of these resources. See the "Caution" under [Usage notes](#usage-notes)
500 | above for details.
501 |
502 | #### Migrate your access control configuration
503 |
504 | There is not exactly a rote transformation from the old access control
505 | configuration to the new one, and there are some new wrinkles to consider.
506 | Follow the guidance provided above under [Configuration Migration Steps](#configuration-migration-steps).
507 |
508 | #### Migrate management of additional security group (if applicable)
509 |
510 | For historical reasons, this module previously supported creating an
511 | additional Security Group, with the idea that it would be used for worker
512 | nodes. You can find some more information about this in the [Migration From v1 to v2](migration-v1-v2.md#background) document.
513 |
514 | If you had **not** set `create_security_group = true` in version 2 (you
515 | either set it to `false` or left it at its default value), you
516 | can skip this step.
517 |
518 | If you **had** set `create_security_group = true` and you do nothing about it
519 | before updating to version 4, Terraform will try to remove the Security
520 | Group and most likely fail with a timeout error because the Security Group
521 | is still associated with some resources.
522 |
523 | You have several options for how to proceed:
524 |
525 | 1. Manually delete the Security Group and remove any usage of it. It may be
526 | that it was not being used, or it was being used in a redundant fashion
527 | and thus was not needed. It may also be that it was being used to provide
528 | this module with access to the EKS control plane, so that it could manage
529 | the `aws-auth` ConfigMap. Since that access is no longer needed, you
530 | might be able to safely delete the Security Group without any replacement.
531 |
532 | 1. Manually delete the Security Group and migrate any usage and
533 | configuration of it to the EKS-managed Security Group. This is discussed
534 | in the next section.
535 |
536 | 1. Manually delete the Security Group and create a new one in your root
537 | module or a separate component, using our [security-group module](https://github.com/cloudposse/terraform-aws-security-group).
538 |
539 |
540 | Because this is a complex operation with several options and potential
541 | impacts, and because this feature had been deprecated for a long time, we are
542 | not providing further instructions here. If you need assistance with this,
543 | please contact [Cloud Posse Professional Services](https://cloudposse.com/professional-services/)
544 | for options and pricing.
545 |
546 | #### Migrate management of EKS-managed security group (if applicable)
547 |
548 | EKS creates a Security Group for the cluster, and all managed nodes are
549 | automatically associated with that Security Group. The primary purpose of that
550 | security group is to enable communication between the nodes and the Kubernetes
551 | control plane.
552 |
553 | When you create a node group for the cluster, even an EKS managed node group,
554 | you can associate the nodes with additional Security Groups as well.
555 | As a best practice, you would modify a node group Security Group to allow
556 | communication between the nodes and other resources, such as a database, or
557 | even the public internet via a NAT Gateway, while leaving the EKS managed
558 | Security Group alone, to protect the control plane. You would manage the
559 | rules for the node group's Security Group along with managing the node group.
560 |
561 | However, people often instead modify the EKS-managed Security Group to allow
562 | the necessary communication rather than create a separate Security group.
563 | This was previously necessary in order to allow the v2 version of this
564 | module to be able to manage the `aws-auth` ConfigMap via the Kubernetes
565 | control plane.
566 |
567 | Depending on your use cases and security posture, you may want to migrate
568 | existing access rules to a new security group, or you may want to modify the
569 | rules in the EKS-managed Security Group to allow the necessary communication.
570 |
571 | This module retains some of the v2 features that allow you to add ingress
572 | rules to the EKS-managed Security Group, but it no longer allows you to
573 | create and manage a separate Security Group for worker nodes, as explained
574 | above.
575 |
576 | To make changes to the EKS-managed Security Group, we recommend that
577 | you either directly use the `aws_vpc_security_group_ingress_rule` and
578 | `aws_vpc_security_group_egress_rule` resources in your root module, or use a
579 | specialized module such as Cloud Posse's [security-group module]
580 | (https://github.com/cloudposse/terraform-aws-security-group) (once v3 is
581 | released) to manage the rules. This will give you much more control and
582 | flexibility over the rules than this module provides.
583 |
584 | For backward compatibility, this module still supports adding ingress
585 | rules to the EKS-managed Security Group, which may be sufficient for the
586 | simple case of allowing ingress from anything in your VPC. To use this
587 | feature:
588 |
589 | 1. Set `managed_security_group_rules_enabled = true` in your configuration.
590 | Without this, any other settings affecting the security group will be
591 | ignored.
592 | 2. Allow all ingress from designated security groups by adding their IDs to
593 | `allowed_security_group_ids`.
594 | 3. Allow all ingress from designated CIDR blocks by adding them to
595 | `allowed_cidr_blocks`.
596 | 4. You can add more fine-grained ingress rules via the
597 | `custom_ingress_rules` input, but this input requires that the source
598 | security group ID be known at plan time and that there is no more than
599 | one single rule per source security group.
600 |
601 |
602 | ### Pt. 2: No Going Back
603 |
604 | > [!WARNING]
605 | > #### Once you set `API_AND_CONFIG_MAP` mode, there is no going back
606 | >
607 | > Once you proceed with the following steps, there is no going back.
608 | > AWS will not allow you to disable the new access control API once it is
609 | > enabled, and restoring this modules access to the `aws-auth` ConfigMap
610 | > will be difficult if not impossible, and we do not support it.
611 |
612 | #### Verify `kubectl` access
613 |
614 | Configure `kubectl` to access the cluster via EKS authentication:
615 |
616 | - Assume an IAM role (or set your `AWS_PROFILE` environment variable) so that
617 | you are using credentials that should have Cluster Admin access to the
618 | cluster
619 | - Set your `AWS_DEFAULT_REGION` to the region where the cluster is located
620 | - Run `aws eks update-kubeconfig --name ` to configure `kubectl`
621 | to reference the cluster
622 |
623 | Test your access with `kubectl` and optionally `rakkess`.
624 | ([rakkess](https://github.com/corneliusweig/rakkess) is a tool that shows
625 | what kind of access you have to resources in a Kubernetes cluster. It is
626 | pre-installed in most versions of Geodesic and can be installed on Linux
627 | systems via Cloud Posse's Debain or RPM package repositories.)
628 |
629 |
630 | ```shell
631 | # check if you have any access at all. Should output "yes".
632 | kubectl auth can-i -A create selfsubjectaccessreviews.authorization.k8s.io
633 |
634 | # Do you have basic read access?
635 | kubectl get nodes
636 |
637 | # Do you have full cluster administrator access?
638 | kubectl auth can-i '*' '*'
639 |
640 | # Show me what I can and cannot do (if `rakkess` is installed)
641 | rakkess
642 |
643 | ```
644 |
645 | #### Update your module reference to v4
646 |
647 | Update your module reference to version 4.0.0 or later in your root module or
648 | component. Ensure that you have updated all the inputs to the module to match
649 | the new inputs.
650 |
651 | Run `terraform plan` or `atmos terraform plan -s `
652 | and fix any errors you get, such as "Unsupported argument", until the only
653 | error you are left with is something like:
654 |
655 | ```plaintext
656 | Error: Provider configuration not present
657 | │
658 | │ To work with module.eks_cluster.kubernetes_config_map.aws_auth[0] (orphan)
659 | | its original provider configuration at ... is required, but it has been removed.
660 | ```
661 |
662 | or
663 |
664 | ```plaintext
665 | │ Error: Get “http://localhost/api/v1/namespaces/kube-system/configmaps/aws-auth”: dial tcp [::1]:80: connect: connection refused
666 | ```
667 |
668 | #### Remove the `auth-map` from the Terraform state
669 |
670 | If you got the error message about the `auth-map` being an orphan, then
671 | take the "resource address" of the `auth-map` from the error message (the
672 | part before "`(orphan)`") and remove it from the terraform state. Using the
673 | address from the error message above, you would run:
674 |
675 | ```shell
676 | terraform state rm 'module.eks_cluster.kubernetes_config_map.aws_auth[0]'
677 | # or
678 | atmos terraform state rm 'module.eks_cluster.kubernetes_config_map.aws_auth[0]' -s
679 | ```
680 |
681 | It is important to include the single quotes around the address, because
682 | otherwise `[0]` would be interpreted as a shell glob.
683 |
684 | If you got the "connection refused" error message, then you need to find the
685 | resource(s) to remove from the state. You can do this by running:
686 |
687 | ```shell
688 | terraform state list | grep kubernetes_
689 | ```
690 |
691 | There should only be one resource output from this command. If there are
692 | more, then review the "Caution" under [Usage notes](#usage-notes) and the
693 | [Prerequisites](#ensure-your-cluster-satisfies-the-prerequisites) above.
694 |
695 | Use the address output from the above command to remove the resource from
696 | the Terraform state, as shown above.
697 |
698 | Run `terraform plan` again, at which point you should see no errors.
699 |
700 | #### Review the changes
701 |
702 | You should review the changes that Terraform is planning to make to your
703 | cluster. Calmly. Expect some changes.
704 |
705 | - `...null_resource.wait_for_cluster[0]` will be **destroyed**. This is
706 | expected, because it was part of the old method of managing the `aws-auth` ConfigMap.
707 | - Various `aws_security_group_rule` resources will be **destroyed**. They
708 | should be replaced with corresponding
709 | `aws_vpc_security_group_ingress_rule` resources. Note that if you had
710 | specified multiple ingress CIDRs in `allowed_cidr_blocks`, the used to be
711 | managed by a single `aws_security_group_rule` resource, but now each CIDR
712 | is managed by a separate `aws_vpc_security_group_ingress_rule` resource,
713 | so you may see more rule resources being created than destroyed.
714 | - `...aws_eks_cluster.default[0]` will be **updated**. This is expected,
715 | because the `authentication_mode` is changing from "CONFIG_MAP" to
716 | "API_AND_CONFIG_MAP". This is the main point of this upgrade.
717 | - Expect to see resources of `aws_eks_access_entry` and
718 | `aws_eks_access_policy_association` being **created**. These are the new
719 | resources that manage access to the EKS cluster, replacing the entries in
720 | the old `aws-auth` ConfigMap.
721 | - You will likely see changes to `...aws_iam_openid_connect_provider.default[0]`.
722 | This is because it depends on the `aws_eks_cluster` resource, specifically
723 | its TLS certificate, and the `aws_eks_cluster` resource is being updated,
724 | so Terraform cannot be sure that the OIDC provider will not need to be
725 | updated as well. This is expected and harmless.
726 | - You will likely see changes to IRSA (service account role) resources. This
727 | is because they depend on the OIDC provider, and the OIDC provider may
728 | need to be updated. This is expected and harmless.
729 |
730 | #### Apply the changes
731 |
732 | Apply the changes with `terraform apply` or
733 |
734 | ```shell
735 | atmos terraform apply -s --from-planfile
736 | ```
737 |
738 | (The `--from-planfile` tells `atmos` to use the planfile it just generated
739 | rather than to create a new one. This is safer, because it ensures that the
740 | planfile you reviewed is the one that is applied. However, in future steps,
741 | we will run `apply` directly, without first running `plan` and without using
742 | `--from-planfile`, to save time and effort. This is safe because we still
743 | have a chance to review and approve or cancel the changes before they are
744 | applied.)
745 |
746 | ##### Error: creating EKS Access Entry
747 |
748 | You may get an error message like this (truncated):
749 |
750 | ```plaintext
751 | │ Error: creating EKS Access Entry
752 | (eg-test-eks-cluster:arn:aws:iam::123456789012:role/eg-test-terraform):
753 | operation error EKS: CreateAccessEntry,
754 | https response error StatusCode: 409, RequestID: ..., ResourceInUseException:
755 | The specified access entry resource is already in use on this cluster.
756 | │
757 | │ with module.eks_cluster.aws_eks_access_entry.map["arn:aws:iam::123456789012:role/eg-test-terraform"],
758 | │ on .terraform/e98s/modules/eks_cluster/auth.tf line 60, in resource "aws_eks_access_entry" "map":
759 | ```
760 |
761 | This is because, during the conversion from "CONFIG_MAP" to
762 | "API_AND_CONFIG_MAP", EKS automatically adds an access entry for the EKS
763 | cluster creator.
764 |
765 | If you have been following Cloud Posse's recommendations, you will have
766 | configured ClusterAdmin access for the IAM principal that you used to create
767 | the EKS cluster. This configuration duplicates the automatically created access
768 | entry, resulting in the above error.
769 |
770 | We have not found a way to avoid this situation, so our best recommendation is,
771 | if you encounter it, import the automatically created access entry into your
772 | Terraform state. The `access entry ID` to import is given in the error
773 | message in parentheses. In the example above, the ID is
774 | `eg-test-eks-cluster:arn:aws:iam::123456789012:role/eg-test-terraform`.
775 |
776 | The Terraform `resource address` for the resource will also be in the error
777 | message: it is the part after "with". In the example above, the address is
778 |
779 | ```plaintext
780 | module.eks_cluster.aws_eks_access_entry.map["arn:aws:iam::123456789012:role/eg-test-terraform"]
781 | ```
782 |
783 | If you do not see it in the error message, you can find it by running
784 | `terraform plan` (or the corresponding `atmos` command) and look for the
785 | corresponding access entry resource that Terraform will want to create. It
786 | will be something like
787 |
788 | ```plaintext
789 | ...aws_eks_access_entry.map["arn:aws:iam::123456789012:role/eg-test-terraform"]
790 | ```
791 |
792 | although it may be `standard` instead of `map`.
793 |
794 | To import the resource using `atmos`, use the same component and stack name
795 | as you were using to deploy the cluster, and run a command like
796 |
797 | ```shell
798 | atmos terraform import \
799 | '' '' \
800 | -s=
801 | ```
802 |
803 | To import the resource using Terraform, again, you need to supply the same
804 | configuration that you used to deploy the cluster, and run a command like
805 |
806 | ```shell
807 | terraform import -var-file '' ''
808 | ```
809 |
810 | > [!IMPORTANT]
811 | > #### Use single quotes around the resource address and access entry ID
812 | >
813 | > It is critical to use single quotes around the resource address and access
814 | > entry ID to prevent the shell from interpreting the square brackets and colons
815 | > and to preserve the double quotes in the resource address.
816 |
817 | After successfully importing the resource, run `terraform apply`
818 | (generating a new planfile) to add tags to the entry and make any other
819 | changes that were not made because of the above error.
820 |
821 | #### Verify access to the cluster
822 |
823 | Verify that you still have the access to the cluster that you expect, as you
824 | did before the upgrade.
825 |
826 |
827 | ```shell
828 | kubectl auth can-i '*' '*'
829 | ```
830 |
831 | which will return `yes` if you have full access to the cluster.
832 |
833 | For a more detailed report, you can use [rakkess](https://github.com/corneliusweig/rakkess),
834 | which is available via many avenues, including Cloud Posse's package repository,
835 | and is installed by default on some versions of Geodesic.
836 |
837 | #### Clean up
838 |
839 | At this point you have both the old and new access control methods enabled,
840 | but nothing is managing the `aws-auth` ConfigMap. The `aws-auth` ConfigMap
841 | has been abandoned by this module and will no longer have entries added or,
842 | crucially, removed. In order to remove this lingering unmanaged grant of
843 | access, you should now proceed to migrate the cluster to be managed solely
844 | by the new access control API, and manually remove the `aws-auth` ConfigMap.
845 |
846 | - Update the `authentication_mode` to "API" in your configuration, and run
847 | `terraform apply` again. This will cause EKS to ignore the `aws-auth`
848 | ConfigMap, but will not remove it.
849 | - Manually remove the `aws-auth` ConfigMap. You can do this with `kubectl
850 | delete configmap aws-auth --namespace kube-system`. This will not affect
851 | the cluster, because it is now being managed by the new access control API,
852 | but it will reduce the possibility of confusion in the future.
853 |
--------------------------------------------------------------------------------
/examples/complete/context.tf:
--------------------------------------------------------------------------------
1 | #
2 | # ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label
3 | # All other instances of this file should be a copy of that one
4 | #
5 | #
6 | # Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf
7 | # and then place it in your Terraform module to automatically get
8 | # Cloud Posse's standard configuration inputs suitable for passing
9 | # to Cloud Posse modules.
10 | #
11 | # curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf
12 | #
13 | # Modules should access the whole context as `module.this.context`
14 | # to get the input variables with nulls for defaults,
15 | # for example `context = module.this.context`,
16 | # and access individual variables as `module.this.`,
17 | # with final values filled in.
18 | #
19 | # For example, when using defaults, `module.this.context.delimiter`
20 | # will be null, and `module.this.delimiter` will be `-` (hyphen).
21 | #
22 |
23 | module "this" {
24 | source = "cloudposse/label/null"
25 | version = "0.25.0" # requires Terraform >= 0.13.0
26 |
27 | enabled = var.enabled
28 | namespace = var.namespace
29 | tenant = var.tenant
30 | environment = var.environment
31 | stage = var.stage
32 | name = var.name
33 | delimiter = var.delimiter
34 | attributes = var.attributes
35 | tags = var.tags
36 | additional_tag_map = var.additional_tag_map
37 | label_order = var.label_order
38 | regex_replace_chars = var.regex_replace_chars
39 | id_length_limit = var.id_length_limit
40 | label_key_case = var.label_key_case
41 | label_value_case = var.label_value_case
42 | descriptor_formats = var.descriptor_formats
43 | labels_as_tags = var.labels_as_tags
44 |
45 | context = var.context
46 | }
47 |
48 | # Copy contents of cloudposse/terraform-null-label/variables.tf here
49 |
50 | variable "context" {
51 | type = any
52 | default = {
53 | enabled = true
54 | namespace = null
55 | tenant = null
56 | environment = null
57 | stage = null
58 | name = null
59 | delimiter = null
60 | attributes = []
61 | tags = {}
62 | additional_tag_map = {}
63 | regex_replace_chars = null
64 | label_order = []
65 | id_length_limit = null
66 | label_key_case = null
67 | label_value_case = null
68 | descriptor_formats = {}
69 | # Note: we have to use [] instead of null for unset lists due to
70 | # https://github.com/hashicorp/terraform/issues/28137
71 | # which was not fixed until Terraform 1.0.0,
72 | # but we want the default to be all the labels in `label_order`
73 | # and we want users to be able to prevent all tag generation
74 | # by setting `labels_as_tags` to `[]`, so we need
75 | # a different sentinel to indicate "default"
76 | labels_as_tags = ["unset"]
77 | }
78 | description = <<-EOT
79 | Single object for setting entire context at once.
80 | See description of individual variables for details.
81 | Leave string and numeric variables as `null` to use default value.
82 | Individual variable settings (non-null) override settings in context object,
83 | except for attributes, tags, and additional_tag_map, which are merged.
84 | EOT
85 |
86 | validation {
87 | condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
88 | error_message = "Allowed values: `lower`, `title`, `upper`."
89 | }
90 |
91 | validation {
92 | condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
93 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
94 | }
95 | }
96 |
97 | variable "enabled" {
98 | type = bool
99 | default = null
100 | description = "Set to false to prevent the module from creating any resources"
101 | }
102 |
103 | variable "namespace" {
104 | type = string
105 | default = null
106 | description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique"
107 | }
108 |
109 | variable "tenant" {
110 | type = string
111 | default = null
112 | description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for"
113 | }
114 |
115 | variable "environment" {
116 | type = string
117 | default = null
118 | description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'"
119 | }
120 |
121 | variable "stage" {
122 | type = string
123 | default = null
124 | description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'"
125 | }
126 |
127 | variable "name" {
128 | type = string
129 | default = null
130 | description = <<-EOT
131 | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
132 | This is the only ID element not also included as a `tag`.
133 | The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
134 | EOT
135 | }
136 |
137 | variable "delimiter" {
138 | type = string
139 | default = null
140 | description = <<-EOT
141 | Delimiter to be used between ID elements.
142 | Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
143 | EOT
144 | }
145 |
146 | variable "attributes" {
147 | type = list(string)
148 | default = []
149 | description = <<-EOT
150 | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
151 | in the order they appear in the list. New attributes are appended to the
152 | end of the list. The elements of the list are joined by the `delimiter`
153 | and treated as a single ID element.
154 | EOT
155 | }
156 |
157 | variable "labels_as_tags" {
158 | type = set(string)
159 | default = ["default"]
160 | description = <<-EOT
161 | Set of labels (ID elements) to include as tags in the `tags` output.
162 | Default is to include all labels.
163 | Tags with empty values will not be included in the `tags` output.
164 | Set to `[]` to suppress all generated tags.
165 | **Notes:**
166 | The value of the `name` tag, if included, will be the `id`, not the `name`.
167 | Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
168 | changed in later chained modules. Attempts to change it will be silently ignored.
169 | EOT
170 | }
171 |
172 | variable "tags" {
173 | type = map(string)
174 | default = {}
175 | description = <<-EOT
176 | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
177 | Neither the tag keys nor the tag values will be modified by this module.
178 | EOT
179 | }
180 |
181 | variable "additional_tag_map" {
182 | type = map(string)
183 | default = {}
184 | description = <<-EOT
185 | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
186 | This is for some rare cases where resources want additional configuration of tags
187 | and therefore take a list of maps with tag key, value, and additional configuration.
188 | EOT
189 | }
190 |
191 | variable "label_order" {
192 | type = list(string)
193 | default = null
194 | description = <<-EOT
195 | The order in which the labels (ID elements) appear in the `id`.
196 | Defaults to ["namespace", "environment", "stage", "name", "attributes"].
197 | You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present.
198 | EOT
199 | }
200 |
201 | variable "regex_replace_chars" {
202 | type = string
203 | default = null
204 | description = <<-EOT
205 | Terraform regular expression (regex) string.
206 | Characters matching the regex will be removed from the ID elements.
207 | If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
208 | EOT
209 | }
210 |
211 | variable "id_length_limit" {
212 | type = number
213 | default = null
214 | description = <<-EOT
215 | Limit `id` to this many characters (minimum 6).
216 | Set to `0` for unlimited length.
217 | Set to `null` for keep the existing setting, which defaults to `0`.
218 | Does not affect `id_full`.
219 | EOT
220 | validation {
221 | condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
222 | error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
223 | }
224 | }
225 |
226 | variable "label_key_case" {
227 | type = string
228 | default = null
229 | description = <<-EOT
230 | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
231 | Does not affect keys of tags passed in via the `tags` input.
232 | Possible values: `lower`, `title`, `upper`.
233 | Default value: `title`.
234 | EOT
235 |
236 | validation {
237 | condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
238 | error_message = "Allowed values: `lower`, `title`, `upper`."
239 | }
240 | }
241 |
242 | variable "label_value_case" {
243 | type = string
244 | default = null
245 | description = <<-EOT
246 | Controls the letter case of ID elements (labels) as included in `id`,
247 | set as tag values, and output by this module individually.
248 | Does not affect values of tags passed in via the `tags` input.
249 | Possible values: `lower`, `title`, `upper` and `none` (no transformation).
250 | Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
251 | Default value: `lower`.
252 | EOT
253 |
254 | validation {
255 | condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
256 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
257 | }
258 | }
259 |
260 | variable "descriptor_formats" {
261 | type = any
262 | default = {}
263 | description = <<-EOT
264 | Describe additional descriptors to be output in the `descriptors` output map.
265 | Map of maps. Keys are names of descriptors. Values are maps of the form
266 | `{
267 | format = string
268 | labels = list(string)
269 | }`
270 | (Type is `any` so the map values can later be enhanced to provide additional options.)
271 | `format` is a Terraform format string to be passed to the `format()` function.
272 | `labels` is a list of labels, in order, to pass to `format()` function.
273 | Label values will be normalized before being passed to `format()` so they will be
274 | identical to how they appear in `id`.
275 | Default is `{}` (`descriptors` output will be empty).
276 | EOT
277 | }
278 |
279 | #### End of copy of cloudposse/terraform-null-label/variables.tf
280 |
--------------------------------------------------------------------------------
/examples/complete/fixtures.us-east-2.tfvars:
--------------------------------------------------------------------------------
1 | region = "us-east-2"
2 |
3 | availability_zones = ["us-east-2a", "us-east-2b"]
4 |
5 | namespace = "eg"
6 |
7 | stage = "test"
8 |
9 | name = "eks"
10 |
11 | # oidc_provider_enabled is required to be true for VPC CNI addon
12 | oidc_provider_enabled = true
13 |
14 | enabled_cluster_log_types = ["audit"]
15 |
16 | cluster_log_retention_period = 7
17 |
18 | instance_types = ["t3.small"]
19 |
20 | desired_size = 2
21 |
22 | max_size = 3
23 |
24 | min_size = 2
25 |
26 | kubernetes_labels = {}
27 |
28 | cluster_encryption_config_enabled = true
29 |
30 | # When updating the Kubernetes version, also update the API and client-go version in test/src/go.mod
31 | kubernetes_version = "1.29"
32 |
33 | private_ipv6_enabled = false
34 |
35 | addons = [
36 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html
37 | {
38 | addon_name = "kube-proxy"
39 | addon_version = null
40 | resolve_conflicts_on_create = "OVERWRITE"
41 | resolve_conflicts_on_update = "PRESERVE"
42 | service_account_role_arn = null
43 | },
44 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html
45 | {
46 | addon_name = "coredns"
47 | addon_version = null
48 | resolve_conflicts = "NONE"
49 | service_account_role_arn = null
50 | },
51 | ]
52 |
53 | upgrade_policy = {
54 | support_type = "STANDARD"
55 | }
56 |
57 | zonal_shift_config = {
58 | enabled = true
59 | }
60 |
--------------------------------------------------------------------------------
/examples/complete/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
5 | module "label" {
6 | source = "cloudposse/label/null"
7 | version = "0.25.0"
8 |
9 | attributes = ["cluster"]
10 |
11 | context = module.this.context
12 | }
13 |
14 | data "aws_caller_identity" "current" {}
15 |
16 | data "aws_iam_session_context" "current" {
17 | arn = data.aws_caller_identity.current.arn
18 | }
19 |
20 | locals {
21 | enabled = module.this.enabled
22 |
23 | private_ipv6_enabled = var.private_ipv6_enabled
24 |
25 | # The usage of the specific kubernetes.io/cluster/* resource tags below are required
26 | # for EKS and Kubernetes to discover and manage networking resources
27 | # https://aws.amazon.com/premiumsupport/knowledge-center/eks-vpc-subnet-discovery/
28 | # https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/deploy/subnet_discovery.md
29 | tags = { "kubernetes.io/cluster/${module.label.id}" = "shared" }
30 |
31 | # required tags to make ALB ingress work https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
32 | public_subnets_additional_tags = {
33 | "kubernetes.io/role/elb" : 1
34 | }
35 | private_subnets_additional_tags = {
36 | "kubernetes.io/role/internal-elb" : 1
37 | }
38 |
39 | # Enable the IAM user creating the cluster to administer it,
40 | # without using the bootstrap_cluster_creator_admin_permissions option,
41 | # as a way to test the access_entry_map feature.
42 | # In general, this is not recommended. Instead, you should
43 | # create the access_entry_map statically, with the ARNs you want to
44 | # have access to the cluster. We do it dynamically here just for testing purposes.
45 | access_entry_map = {
46 | (data.aws_iam_session_context.current.issuer_arn) = {
47 | access_policy_associations = {
48 | ClusterAdmin = {}
49 | }
50 | }
51 | }
52 |
53 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html#vpc-cni-latest-available-version
54 | vpc_cni_addon = {
55 | addon_name = "vpc-cni"
56 | addon_version = null
57 | resolve_conflicts = "OVERWRITE"
58 | service_account_role_arn = one(module.vpc_cni_eks_iam_role[*].service_account_role_arn)
59 | }
60 |
61 | addons = concat([
62 | local.vpc_cni_addon
63 | ], var.addons)
64 | }
65 |
66 | module "vpc" {
67 | source = "cloudposse/vpc/aws"
68 | version = "2.2.0"
69 |
70 | ipv4_primary_cidr_block = "172.16.0.0/16"
71 | tags = local.tags
72 |
73 | context = module.this.context
74 | }
75 |
76 | module "subnets" {
77 | source = "cloudposse/dynamic-subnets/aws"
78 | version = "2.4.2"
79 |
80 | availability_zones = var.availability_zones
81 | vpc_id = module.vpc.vpc_id
82 | igw_id = [module.vpc.igw_id]
83 | ipv4_cidr_block = [module.vpc.vpc_cidr_block]
84 | ipv6_cidr_block = [module.vpc.vpc_ipv6_cidr_block]
85 | ipv6_enabled = true
86 | max_nats = 1
87 | nat_gateway_enabled = true
88 | nat_instance_enabled = false
89 | tags = local.tags
90 | public_subnets_additional_tags = local.public_subnets_additional_tags
91 | private_subnets_enabled = true
92 | private_subnets_additional_tags = local.private_subnets_additional_tags
93 |
94 | context = module.this.context
95 | }
96 |
97 | module "eks_cluster" {
98 | source = "../../"
99 |
100 | subnet_ids = concat(module.subnets.private_subnet_ids, module.subnets.public_subnet_ids)
101 | kubernetes_version = var.kubernetes_version
102 | oidc_provider_enabled = var.oidc_provider_enabled
103 | enabled_cluster_log_types = var.enabled_cluster_log_types
104 | cluster_log_retention_period = var.cluster_log_retention_period
105 |
106 | cluster_encryption_config_enabled = var.cluster_encryption_config_enabled
107 | cluster_encryption_config_kms_key_id = var.cluster_encryption_config_kms_key_id
108 | cluster_encryption_config_kms_key_enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation
109 | cluster_encryption_config_kms_key_deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days
110 | cluster_encryption_config_kms_key_policy = var.cluster_encryption_config_kms_key_policy
111 | cluster_encryption_config_resources = var.cluster_encryption_config_resources
112 |
113 | addons = local.addons
114 | addons_depends_on = [module.eks_node_group]
115 | bootstrap_self_managed_addons_enabled = var.bootstrap_self_managed_addons_enabled
116 | upgrade_policy = var.upgrade_policy
117 | zonal_shift_config = var.zonal_shift_config
118 |
119 | access_entry_map = local.access_entry_map
120 | access_config = {
121 | authentication_mode = "API"
122 | bootstrap_cluster_creator_admin_permissions = false
123 | }
124 |
125 | # This is to test `allowed_security_group_ids` and `allowed_cidr_blocks`
126 | # In a real cluster, these should be some other (existing) Security Groups and CIDR blocks to allow access to the cluster
127 | allowed_security_group_ids = [module.vpc.vpc_default_security_group_id]
128 | allowed_cidr_blocks = [module.vpc.vpc_cidr_block]
129 |
130 | kubernetes_network_ipv6_enabled = local.private_ipv6_enabled
131 |
132 | context = module.this.context
133 |
134 | cluster_depends_on = [module.subnets]
135 | }
136 |
137 | module "eks_node_group" {
138 | source = "cloudposse/eks-node-group/aws"
139 | version = "3.2.0"
140 |
141 | # node group <= 3.2 requires a non-empty list of subnet_ids, even when disabled
142 | subnet_ids = local.enabled ? module.subnets.public_subnet_ids : ["filler_string_for_enabled_is_false"]
143 | cluster_name = module.eks_cluster.eks_cluster_id
144 | instance_types = var.instance_types
145 | desired_size = var.desired_size
146 | min_size = var.min_size
147 | max_size = var.max_size
148 | kubernetes_labels = var.kubernetes_labels
149 |
150 | # Test default of using cluster's version, but when disabled node group <= 3.2.0 requires kubernetes_version be supplied
151 | kubernetes_version = local.enabled ? null : [var.kubernetes_version]
152 |
153 | context = module.this.context
154 | }
155 |
--------------------------------------------------------------------------------
/examples/complete/outputs.tf:
--------------------------------------------------------------------------------
1 | output "public_subnet_cidrs" {
2 | value = module.subnets.public_subnet_cidrs
3 | description = "Public subnet CIDRs"
4 | }
5 |
6 | output "private_subnet_cidrs" {
7 | value = module.subnets.private_subnet_cidrs
8 | description = "Private subnet CIDRs"
9 | }
10 |
11 | output "vpc_cidr" {
12 | value = module.vpc.vpc_cidr_block
13 | description = "VPC ID"
14 | }
15 |
16 | output "eks_cluster_id" {
17 | description = "The name of the cluster"
18 | value = module.eks_cluster.eks_cluster_id
19 | }
20 |
21 | output "eks_cluster_arn" {
22 | description = "The Amazon Resource Name (ARN) of the cluster"
23 | value = module.eks_cluster.eks_cluster_arn
24 | }
25 |
26 | output "eks_cluster_endpoint" {
27 | description = "The endpoint for the Kubernetes API server"
28 | value = module.eks_cluster.eks_cluster_endpoint
29 | }
30 |
31 | output "eks_cluster_version" {
32 | description = "The Kubernetes server version of the cluster"
33 | value = module.eks_cluster.eks_cluster_version
34 | }
35 |
36 | output "eks_cluster_identity_oidc_issuer" {
37 | description = "The OIDC Identity issuer for the cluster"
38 | value = module.eks_cluster.eks_cluster_identity_oidc_issuer
39 | }
40 |
41 | output "eks_cluster_managed_security_group_id" {
42 | description = "Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads"
43 | value = module.eks_cluster.eks_cluster_managed_security_group_id
44 | }
45 |
46 | output "eks_cluster_ipv6_service_cidr" {
47 | description = <<-EOT
48 | The IPv6 CIDR block that Kubernetes pod and service IP addresses are assigned from
49 | if `kubernetes_network_ipv6_enabled` is set to true. If set to false this output will be null.
50 | EOT
51 | value = module.eks_cluster.eks_cluster_ipv6_service_cidr
52 | }
53 |
54 | output "eks_node_group_role_arn" {
55 | description = "ARN of the worker nodes IAM role"
56 | value = module.eks_node_group.eks_node_group_role_arn
57 | }
58 |
59 | output "eks_node_group_role_name" {
60 | description = "Name of the worker nodes IAM role"
61 | value = module.eks_node_group.eks_node_group_role_name
62 | }
63 |
64 | output "eks_node_group_id" {
65 | description = "EKS Cluster name and EKS Node Group name separated by a colon"
66 | value = module.eks_node_group.eks_node_group_id
67 | }
68 |
69 | output "eks_node_group_arn" {
70 | description = "Amazon Resource Name (ARN) of the EKS Node Group"
71 | value = module.eks_node_group.eks_node_group_arn
72 | }
73 |
74 | output "eks_node_group_resources" {
75 | description = "List of objects containing information about underlying resources of the EKS Node Group"
76 | value = module.eks_node_group.eks_node_group_resources
77 | }
78 |
79 | output "eks_node_group_status" {
80 | description = "Status of the EKS Node Group"
81 | value = module.eks_node_group.eks_node_group_status
82 | }
83 |
--------------------------------------------------------------------------------
/examples/complete/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | type = string
3 | description = "AWS Region"
4 | }
5 |
6 | variable "availability_zones" {
7 | type = list(string)
8 | description = "List of availability zones"
9 | }
10 |
11 | variable "kubernetes_version" {
12 | type = string
13 | default = "1.29"
14 | description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used"
15 | }
16 |
17 | variable "enabled_cluster_log_types" {
18 | type = list(string)
19 | default = []
20 | description = "A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]"
21 | }
22 |
23 | variable "cluster_log_retention_period" {
24 | type = number
25 | default = 0
26 | description = "Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html."
27 | }
28 |
29 | variable "oidc_provider_enabled" {
30 | type = bool
31 | default = true
32 | description = "Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using `kiam` or `kube2iam`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html"
33 | }
34 |
35 | variable "instance_types" {
36 | type = list(string)
37 | description = "Set of instance types associated with the EKS Node Group. Defaults to [\"t3.medium\"]. Terraform will only perform drift detection if a configuration value is provided"
38 | }
39 |
40 | variable "kubernetes_labels" {
41 | type = map(string)
42 | description = "Key-value mapping of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed"
43 | default = {}
44 | }
45 |
46 | variable "desired_size" {
47 | type = number
48 | description = "Desired number of worker nodes"
49 | }
50 |
51 | variable "max_size" {
52 | type = number
53 | description = "The maximum size of the AutoScaling Group"
54 | }
55 |
56 | variable "min_size" {
57 | type = number
58 | description = "The minimum size of the AutoScaling Group"
59 | }
60 |
61 | variable "cluster_encryption_config_enabled" {
62 | type = bool
63 | default = true
64 | description = "Set to `true` to enable Cluster Encryption Configuration"
65 | }
66 |
67 | variable "cluster_encryption_config_kms_key_id" {
68 | type = string
69 | default = ""
70 | description = "KMS Key ID to use for cluster encryption config"
71 | }
72 |
73 | variable "cluster_encryption_config_kms_key_enable_key_rotation" {
74 | type = bool
75 | default = true
76 | description = "Cluster Encryption Config KMS Key Resource argument - enable kms key rotation"
77 | }
78 |
79 | variable "cluster_encryption_config_kms_key_deletion_window_in_days" {
80 | type = number
81 | default = 10
82 | description = "Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction"
83 | }
84 |
85 | variable "cluster_encryption_config_kms_key_policy" {
86 | type = string
87 | default = null
88 | description = "Cluster Encryption Config KMS Key Resource argument - key policy"
89 | }
90 |
91 | variable "cluster_encryption_config_resources" {
92 | type = list(any)
93 | default = ["secrets"]
94 | description = "Cluster Encryption Config Resources to encrypt, e.g. ['secrets']"
95 | }
96 |
97 | variable "addons" {
98 | type = list(object({
99 | addon_name = string
100 | addon_version = string
101 | # resolve_conflicts is deprecated, but we keep it for backwards compatibility
102 | # and because if not declared, Terraform will silently ignore it.
103 | resolve_conflicts = optional(string, null)
104 | resolve_conflicts_on_create = optional(string, null)
105 | resolve_conflicts_on_update = optional(string, null)
106 | service_account_role_arn = string
107 | }))
108 | default = []
109 | description = "Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources."
110 | }
111 |
112 | variable "bootstrap_self_managed_addons_enabled" {
113 | description = "Manages bootstrap of default networking addons after cluster has been created"
114 | type = bool
115 | default = null
116 | }
117 |
118 | variable "upgrade_policy" {
119 | type = object({
120 | support_type = optional(string, null)
121 | })
122 | description = "Configuration block for the support policy to use for the cluster"
123 | default = null
124 | }
125 |
126 | variable "zonal_shift_config" {
127 | type = object({
128 | enabled = optional(bool, null)
129 | })
130 | description = "Configuration block with zonal shift configuration for the cluster"
131 | default = null
132 | }
133 |
134 | variable "private_ipv6_enabled" {
135 | type = bool
136 | default = false
137 | description = "Whether to use IPv6 addresses for the pods in the node group"
138 | }
139 |
--------------------------------------------------------------------------------
/examples/complete/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.74"
8 | }
9 | kubernetes = {
10 | source = "hashicorp/kubernetes"
11 | version = ">= 2.7.1"
12 | }
13 | tls = {
14 | source = "hashicorp/tls"
15 | version = ">= 3.1.0"
16 | }
17 | null = {
18 | source = "hashicorp/null"
19 | version = ">= 2.0"
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/examples/complete/vpc-cni.tf:
--------------------------------------------------------------------------------
1 | # `vpc-cni` EKS addon
2 | # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html
3 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html
4 | # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-role
5 | # https://aws.github.io/aws-eks-best-practices/networking/vpc-cni/#deploy-vpc-cni-managed-add-on
6 |
7 | locals {
8 | vpc_cni_sa_needed = local.enabled
9 | }
10 |
11 | # It is important to enable IPv6 support for the VPC CNI plugin
12 | # even if IPv6 is not in use, because the addon may need to
13 | # manage IPv6 addresses during a transition from IPv6 to IPv4
14 | # or vice versa, or while destroying the cluster.
15 | data "aws_iam_policy_document" "vpc_cni_ipv6" {
16 | count = local.vpc_cni_sa_needed ? 1 : 0
17 |
18 | # See https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy
19 | statement {
20 | sid = ""
21 | effect = "Allow"
22 | resources = ["*"]
23 |
24 | actions = [
25 | "ec2:AssignIpv6Addresses",
26 | "ec2:DescribeInstances",
27 | "ec2:DescribeTags",
28 | "ec2:DescribeNetworkInterfaces",
29 | "ec2:DescribeInstanceTypes"
30 | ]
31 | }
32 |
33 | statement {
34 | sid = ""
35 | effect = "Allow"
36 | resources = ["arn:aws:ec2:*:*:network-interface/*"]
37 | actions = ["ec2:CreateTags"]
38 | }
39 | }
40 |
41 | resource "aws_iam_role_policy_attachment" "vpc_cni" {
42 | count = local.vpc_cni_sa_needed ? 1 : 0
43 |
44 | role = module.vpc_cni_eks_iam_role.service_account_role_name
45 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
46 | }
47 |
48 | module "vpc_cni_eks_iam_role" {
49 | source = "cloudposse/eks-iam-role/aws"
50 | version = "2.1.1"
51 |
52 | enabled = local.vpc_cni_sa_needed
53 |
54 | eks_cluster_oidc_issuer_url = module.eks_cluster.eks_cluster_identity_oidc_issuer
55 |
56 | service_account_name = "aws-node"
57 | service_account_namespace = "kube-system"
58 |
59 | aws_iam_policy_document = try([data.aws_iam_policy_document.vpc_cni_ipv6[0].json], [])
60 |
61 | context = module.this.context
62 | }
63 |
--------------------------------------------------------------------------------
/examples/obsolete-version2/context.tf:
--------------------------------------------------------------------------------
1 | #
2 | # ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label
3 | # All other instances of this file should be a copy of that one
4 | #
5 | #
6 | # Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf
7 | # and then place it in your Terraform module to automatically get
8 | # Cloud Posse's standard configuration inputs suitable for passing
9 | # to Cloud Posse modules.
10 | #
11 | # curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf
12 | #
13 | # Modules should access the whole context as `module.this.context`
14 | # to get the input variables with nulls for defaults,
15 | # for example `context = module.this.context`,
16 | # and access individual variables as `module.this.`,
17 | # with final values filled in.
18 | #
19 | # For example, when using defaults, `module.this.context.delimiter`
20 | # will be null, and `module.this.delimiter` will be `-` (hyphen).
21 | #
22 |
23 | module "this" {
24 | source = "cloudposse/label/null"
25 | version = "0.25.0" # requires Terraform >= 0.13.0
26 |
27 | enabled = var.enabled
28 | namespace = var.namespace
29 | tenant = var.tenant
30 | environment = var.environment
31 | stage = var.stage
32 | name = var.name
33 | delimiter = var.delimiter
34 | attributes = var.attributes
35 | tags = var.tags
36 | additional_tag_map = var.additional_tag_map
37 | label_order = var.label_order
38 | regex_replace_chars = var.regex_replace_chars
39 | id_length_limit = var.id_length_limit
40 | label_key_case = var.label_key_case
41 | label_value_case = var.label_value_case
42 | descriptor_formats = var.descriptor_formats
43 | labels_as_tags = var.labels_as_tags
44 |
45 | context = var.context
46 | }
47 |
48 | # Copy contents of cloudposse/terraform-null-label/variables.tf here
49 |
50 | variable "context" {
51 | type = any
52 | default = {
53 | enabled = true
54 | namespace = null
55 | tenant = null
56 | environment = null
57 | stage = null
58 | name = null
59 | delimiter = null
60 | attributes = []
61 | tags = {}
62 | additional_tag_map = {}
63 | regex_replace_chars = null
64 | label_order = []
65 | id_length_limit = null
66 | label_key_case = null
67 | label_value_case = null
68 | descriptor_formats = {}
69 | # Note: we have to use [] instead of null for unset lists due to
70 | # https://github.com/hashicorp/terraform/issues/28137
71 | # which was not fixed until Terraform 1.0.0,
72 | # but we want the default to be all the labels in `label_order`
73 | # and we want users to be able to prevent all tag generation
74 | # by setting `labels_as_tags` to `[]`, so we need
75 | # a different sentinel to indicate "default"
76 | labels_as_tags = ["unset"]
77 | }
78 | description = <<-EOT
79 | Single object for setting entire context at once.
80 | See description of individual variables for details.
81 | Leave string and numeric variables as `null` to use default value.
82 | Individual variable settings (non-null) override settings in context object,
83 | except for attributes, tags, and additional_tag_map, which are merged.
84 | EOT
85 |
86 | validation {
87 | condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
88 | error_message = "Allowed values: `lower`, `title`, `upper`."
89 | }
90 |
91 | validation {
92 | condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
93 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
94 | }
95 | }
96 |
97 | variable "enabled" {
98 | type = bool
99 | default = null
100 | description = "Set to false to prevent the module from creating any resources"
101 | }
102 |
103 | variable "namespace" {
104 | type = string
105 | default = null
106 | description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique"
107 | }
108 |
109 | variable "tenant" {
110 | type = string
111 | default = null
112 | description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for"
113 | }
114 |
115 | variable "environment" {
116 | type = string
117 | default = null
118 | description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'"
119 | }
120 |
121 | variable "stage" {
122 | type = string
123 | default = null
124 | description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'"
125 | }
126 |
127 | variable "name" {
128 | type = string
129 | default = null
130 | description = <<-EOT
131 | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
132 | This is the only ID element not also included as a `tag`.
133 | The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
134 | EOT
135 | }
136 |
137 | variable "delimiter" {
138 | type = string
139 | default = null
140 | description = <<-EOT
141 | Delimiter to be used between ID elements.
142 | Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
143 | EOT
144 | }
145 |
146 | variable "attributes" {
147 | type = list(string)
148 | default = []
149 | description = <<-EOT
150 | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
151 | in the order they appear in the list. New attributes are appended to the
152 | end of the list. The elements of the list are joined by the `delimiter`
153 | and treated as a single ID element.
154 | EOT
155 | }
156 |
157 | variable "labels_as_tags" {
158 | type = set(string)
159 | default = ["default"]
160 | description = <<-EOT
161 | Set of labels (ID elements) to include as tags in the `tags` output.
162 | Default is to include all labels.
163 | Tags with empty values will not be included in the `tags` output.
164 | Set to `[]` to suppress all generated tags.
165 | **Notes:**
166 | The value of the `name` tag, if included, will be the `id`, not the `name`.
167 | Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
168 | changed in later chained modules. Attempts to change it will be silently ignored.
169 | EOT
170 | }
171 |
172 | variable "tags" {
173 | type = map(string)
174 | default = {}
175 | description = <<-EOT
176 | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
177 | Neither the tag keys nor the tag values will be modified by this module.
178 | EOT
179 | }
180 |
181 | variable "additional_tag_map" {
182 | type = map(string)
183 | default = {}
184 | description = <<-EOT
185 | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
186 | This is for some rare cases where resources want additional configuration of tags
187 | and therefore take a list of maps with tag key, value, and additional configuration.
188 | EOT
189 | }
190 |
191 | variable "label_order" {
192 | type = list(string)
193 | default = null
194 | description = <<-EOT
195 | The order in which the labels (ID elements) appear in the `id`.
196 | Defaults to ["namespace", "environment", "stage", "name", "attributes"].
197 | You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present.
198 | EOT
199 | }
200 |
201 | variable "regex_replace_chars" {
202 | type = string
203 | default = null
204 | description = <<-EOT
205 | Terraform regular expression (regex) string.
206 | Characters matching the regex will be removed from the ID elements.
207 | If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
208 | EOT
209 | }
210 |
211 | variable "id_length_limit" {
212 | type = number
213 | default = null
214 | description = <<-EOT
215 | Limit `id` to this many characters (minimum 6).
216 | Set to `0` for unlimited length.
217 | Set to `null` for keep the existing setting, which defaults to `0`.
218 | Does not affect `id_full`.
219 | EOT
220 | validation {
221 | condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
222 | error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
223 | }
224 | }
225 |
226 | variable "label_key_case" {
227 | type = string
228 | default = null
229 | description = <<-EOT
230 | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
231 | Does not affect keys of tags passed in via the `tags` input.
232 | Possible values: `lower`, `title`, `upper`.
233 | Default value: `title`.
234 | EOT
235 |
236 | validation {
237 | condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
238 | error_message = "Allowed values: `lower`, `title`, `upper`."
239 | }
240 | }
241 |
242 | variable "label_value_case" {
243 | type = string
244 | default = null
245 | description = <<-EOT
246 | Controls the letter case of ID elements (labels) as included in `id`,
247 | set as tag values, and output by this module individually.
248 | Does not affect values of tags passed in via the `tags` input.
249 | Possible values: `lower`, `title`, `upper` and `none` (no transformation).
250 | Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
251 | Default value: `lower`.
252 | EOT
253 |
254 | validation {
255 | condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
256 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
257 | }
258 | }
259 |
260 | variable "descriptor_formats" {
261 | type = any
262 | default = {}
263 | description = <<-EOT
264 | Describe additional descriptors to be output in the `descriptors` output map.
265 | Map of maps. Keys are names of descriptors. Values are maps of the form
266 | `{
267 | format = string
268 | labels = list(string)
269 | }`
270 | (Type is `any` so the map values can later be enhanced to provide additional options.)
271 | `format` is a Terraform format string to be passed to the `format()` function.
272 | `labels` is a list of labels, in order, to pass to `format()` function.
273 | Label values will be normalized before being passed to `format()` so they will be
274 | identical to how they appear in `id`.
275 | Default is `{}` (`descriptors` output will be empty).
276 | EOT
277 | }
278 |
279 | #### End of copy of cloudposse/terraform-null-label/variables.tf
280 |
--------------------------------------------------------------------------------
/examples/obsolete-version2/fixtures.us-east-2.tfvars:
--------------------------------------------------------------------------------
1 | region = "us-east-2"
2 |
3 | availability_zones = ["us-east-2a", "us-east-2b"]
4 |
5 | namespace = "eg"
6 |
7 | stage = "test"
8 |
9 | name = "eks"
10 |
11 | # oidc_provider_enabled is required to be true for VPC CNI addon
12 | oidc_provider_enabled = true
13 |
14 | enabled_cluster_log_types = ["audit"]
15 |
16 | cluster_log_retention_period = 7
17 |
18 | instance_types = ["t3.small"]
19 |
20 | desired_size = 2
21 |
22 | max_size = 3
23 |
24 | min_size = 2
25 |
26 | kubernetes_labels = {}
27 |
28 | cluster_encryption_config_enabled = true
29 |
30 | # When updating the Kubernetes version, also update the API and client-go version in test/src/go.mod
31 | kubernetes_version = "1.26"
32 |
33 | addons = [
34 | // https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html#vpc-cni-latest-available-version
35 | {
36 | addon_name = "vpc-cni"
37 | addon_version = null
38 | resolve_conflicts = "OVERWRITE"
39 | service_account_role_arn = null
40 | },
41 | // https://docs.aws.amazon.com/eks/latest/userguide/managing-kube-proxy.html
42 | {
43 | addon_name = "kube-proxy"
44 | addon_version = null
45 | resolve_conflicts = "OVERWRITE"
46 | service_account_role_arn = null
47 | },
48 | // https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html
49 | {
50 | addon_name = "coredns"
51 | addon_version = null
52 | resolve_conflicts = "OVERWRITE"
53 | service_account_role_arn = null
54 | },
55 | ]
56 |
--------------------------------------------------------------------------------
/examples/obsolete-version2/main.tf:
--------------------------------------------------------------------------------
1 |
2 | ##################################################################
3 | ## This example shows how to use version 2.9.0 of this module. ##
4 | ## It is provided for reference only. ##
5 | ## Use version 4.0.0 or later for new deployments. ##
6 | ##################################################################
7 |
8 | provider "aws" {
9 | region = var.region
10 | }
11 |
12 | module "label" {
13 | source = "cloudposse/label/null"
14 | version = "0.25.0"
15 |
16 | attributes = ["cluster"]
17 |
18 | context = module.this.context
19 | }
20 |
21 | locals {
22 | # The usage of the specific kubernetes.io/cluster/* resource tags below are required
23 | # for EKS and Kubernetes to discover and manage networking resources
24 | # https://aws.amazon.com/premiumsupport/knowledge-center/eks-vpc-subnet-discovery/
25 | # https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/deploy/subnet_discovery.md
26 | tags = { "kubernetes.io/cluster/${module.label.id}" = "shared" }
27 |
28 | # required tags to make ALB ingress work https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
29 | public_subnets_additional_tags = {
30 | "kubernetes.io/role/elb" : 1
31 | }
32 | private_subnets_additional_tags = {
33 | "kubernetes.io/role/internal-elb" : 1
34 | }
35 | }
36 |
37 | module "vpc" {
38 | source = "cloudposse/vpc/aws"
39 | version = "2.1.0"
40 |
41 | ipv4_primary_cidr_block = "172.16.0.0/16"
42 | tags = local.tags
43 |
44 | context = module.this.context
45 | }
46 |
47 | module "subnets" {
48 | source = "cloudposse/dynamic-subnets/aws"
49 | version = "2.3.0"
50 |
51 | availability_zones = var.availability_zones
52 | vpc_id = module.vpc.vpc_id
53 | igw_id = [module.vpc.igw_id]
54 | ipv4_cidr_block = [module.vpc.vpc_cidr_block]
55 | max_nats = 1
56 | nat_gateway_enabled = true
57 | nat_instance_enabled = false
58 | tags = local.tags
59 | public_subnets_additional_tags = local.public_subnets_additional_tags
60 | private_subnets_additional_tags = local.private_subnets_additional_tags
61 |
62 | context = module.this.context
63 | }
64 |
65 | module "eks_cluster" {
66 | source = "cloudposse/eks-cluster/aws"
67 | version = "2.9.0"
68 |
69 | vpc_id = module.vpc.vpc_id
70 | subnet_ids = concat(module.subnets.private_subnet_ids, module.subnets.public_subnet_ids)
71 | kubernetes_version = var.kubernetes_version
72 | local_exec_interpreter = var.local_exec_interpreter
73 | oidc_provider_enabled = var.oidc_provider_enabled
74 | enabled_cluster_log_types = var.enabled_cluster_log_types
75 | cluster_log_retention_period = var.cluster_log_retention_period
76 |
77 | cluster_encryption_config_enabled = var.cluster_encryption_config_enabled
78 | cluster_encryption_config_kms_key_id = var.cluster_encryption_config_kms_key_id
79 | cluster_encryption_config_kms_key_enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation
80 | cluster_encryption_config_kms_key_deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days
81 | cluster_encryption_config_kms_key_policy = var.cluster_encryption_config_kms_key_policy
82 | cluster_encryption_config_resources = var.cluster_encryption_config_resources
83 |
84 | addons = var.addons
85 | addons_depends_on = [module.eks_node_group]
86 |
87 | # We need to create a new Security Group only if the EKS cluster is used with unmanaged worker nodes.
88 | # EKS creates a managed Security Group for the cluster automatically, places the control plane and managed nodes into the security group,
89 | # and allows all communications between the control plane and the managed worker nodes
90 | # (EKS applies it to ENIs that are attached to EKS Control Plane master nodes and to any managed workloads).
91 | # If only Managed Node Groups are used, we don't need to create a separate Security Group;
92 | # otherwise we place the cluster in two SGs - one that is created by EKS, the other one that the module creates.
93 | # See https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html for more details.
94 | create_security_group = false
95 |
96 | # This is to test `allowed_security_group_ids` and `allowed_cidr_blocks`
97 | # In a real cluster, these should be some other (existing) Security Groups and CIDR blocks to allow access to the cluster
98 | allowed_security_group_ids = [module.vpc.vpc_default_security_group_id]
99 | allowed_cidr_blocks = [module.vpc.vpc_cidr_block]
100 |
101 | # For manual testing. In particular, set `false` if local configuration/state
102 | # has a cluster but the cluster was deleted by nightly cleanup, in order for
103 | # `terraform destroy` to succeed.
104 | apply_config_map_aws_auth = var.apply_config_map_aws_auth
105 |
106 | context = module.this.context
107 |
108 | cluster_depends_on = [module.subnets]
109 | }
110 |
111 | module "eks_node_group" {
112 | source = "cloudposse/eks-node-group/aws"
113 | version = "2.4.0"
114 |
115 | subnet_ids = module.subnets.private_subnet_ids
116 | cluster_name = module.eks_cluster.eks_cluster_id
117 | instance_types = var.instance_types
118 | desired_size = var.desired_size
119 | min_size = var.min_size
120 | max_size = var.max_size
121 | kubernetes_labels = var.kubernetes_labels
122 |
123 | # Prevent the node groups from being created before the Kubernetes aws-auth ConfigMap
124 | module_depends_on = module.eks_cluster.kubernetes_config_map_id
125 |
126 | context = module.this.context
127 | }
128 |
--------------------------------------------------------------------------------
/examples/obsolete-version2/outputs.tf:
--------------------------------------------------------------------------------
1 | output "public_subnet_cidrs" {
2 | value = module.subnets.public_subnet_cidrs
3 | description = "Public subnet CIDRs"
4 | }
5 |
6 | output "private_subnet_cidrs" {
7 | value = module.subnets.private_subnet_cidrs
8 | description = "Private subnet CIDRs"
9 | }
10 |
11 | output "vpc_cidr" {
12 | value = module.vpc.vpc_cidr_block
13 | description = "VPC ID"
14 | }
15 |
16 | output "eks_cluster_id" {
17 | description = "The name of the cluster"
18 | value = module.eks_cluster.eks_cluster_id
19 | }
20 |
21 | output "eks_cluster_arn" {
22 | description = "The Amazon Resource Name (ARN) of the cluster"
23 | value = module.eks_cluster.eks_cluster_arn
24 | }
25 |
26 | output "eks_cluster_endpoint" {
27 | description = "The endpoint for the Kubernetes API server"
28 | value = module.eks_cluster.eks_cluster_endpoint
29 | }
30 |
31 | output "eks_cluster_version" {
32 | description = "The Kubernetes server version of the cluster"
33 | value = module.eks_cluster.eks_cluster_version
34 | }
35 |
36 | output "eks_cluster_identity_oidc_issuer" {
37 | description = "The OIDC Identity issuer for the cluster"
38 | value = module.eks_cluster.eks_cluster_identity_oidc_issuer
39 | }
40 |
41 | output "eks_cluster_managed_security_group_id" {
42 | description = "Security Group ID that was created by EKS for the cluster. EKS creates a Security Group and applies it to ENI that is attached to EKS Control Plane master nodes and to any managed workloads"
43 | value = module.eks_cluster.eks_cluster_managed_security_group_id
44 | }
45 |
46 | output "eks_node_group_role_arn" {
47 | description = "ARN of the worker nodes IAM role"
48 | value = module.eks_node_group.eks_node_group_role_arn
49 | }
50 |
51 | output "eks_node_group_role_name" {
52 | description = "Name of the worker nodes IAM role"
53 | value = module.eks_node_group.eks_node_group_role_name
54 | }
55 |
56 | output "eks_node_group_id" {
57 | description = "EKS Cluster name and EKS Node Group name separated by a colon"
58 | value = module.eks_node_group.eks_node_group_id
59 | }
60 |
61 | output "eks_node_group_arn" {
62 | description = "Amazon Resource Name (ARN) of the EKS Node Group"
63 | value = module.eks_node_group.eks_node_group_arn
64 | }
65 |
66 | output "eks_node_group_resources" {
67 | description = "List of objects containing information about underlying resources of the EKS Node Group"
68 | value = module.eks_node_group.eks_node_group_resources
69 | }
70 |
71 | output "eks_node_group_status" {
72 | description = "Status of the EKS Node Group"
73 | value = module.eks_node_group.eks_node_group_status
74 | }
75 |
--------------------------------------------------------------------------------
/examples/obsolete-version2/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | type = string
3 | description = "AWS Region"
4 | }
5 |
6 | variable "availability_zones" {
7 | type = list(string)
8 | description = "List of availability zones"
9 | }
10 |
11 | variable "kubernetes_version" {
12 | type = string
13 | default = "1.21"
14 | description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used"
15 | }
16 |
17 | variable "enabled_cluster_log_types" {
18 | type = list(string)
19 | default = []
20 | description = "A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]"
21 | }
22 |
23 | variable "cluster_log_retention_period" {
24 | type = number
25 | default = 0
26 | description = "Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html."
27 | }
28 |
29 | variable "map_additional_aws_accounts" {
30 | description = "Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap"
31 | type = list(string)
32 | default = []
33 | }
34 |
35 | variable "map_additional_iam_roles" {
36 | description = "Additional IAM roles to add to `config-map-aws-auth` ConfigMap"
37 |
38 | type = list(object({
39 | rolearn = string
40 | username = string
41 | groups = list(string)
42 | }))
43 |
44 | default = []
45 | }
46 |
47 | variable "map_additional_iam_users" {
48 | description = "Additional IAM users to add to `config-map-aws-auth` ConfigMap"
49 |
50 | type = list(object({
51 | userarn = string
52 | username = string
53 | groups = list(string)
54 | }))
55 |
56 | default = []
57 | }
58 |
59 | variable "oidc_provider_enabled" {
60 | type = bool
61 | default = true
62 | description = "Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a service account in the cluster, instead of using `kiam` or `kube2iam`. For more information, see https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html"
63 | }
64 |
65 | variable "local_exec_interpreter" {
66 | type = list(string)
67 | default = ["/bin/sh", "-c"]
68 | description = "shell to use for local_exec"
69 | }
70 |
71 | variable "instance_types" {
72 | type = list(string)
73 | description = "Set of instance types associated with the EKS Node Group. Defaults to [\"t3.medium\"]. Terraform will only perform drift detection if a configuration value is provided"
74 | }
75 |
76 | variable "kubernetes_labels" {
77 | type = map(string)
78 | description = "Key-value mapping of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed"
79 | default = {}
80 | }
81 |
82 | variable "desired_size" {
83 | type = number
84 | description = "Desired number of worker nodes"
85 | }
86 |
87 | variable "max_size" {
88 | type = number
89 | description = "The maximum size of the AutoScaling Group"
90 | }
91 |
92 | variable "min_size" {
93 | type = number
94 | description = "The minimum size of the AutoScaling Group"
95 | }
96 |
97 | variable "cluster_encryption_config_enabled" {
98 | type = bool
99 | default = true
100 | description = "Set to `true` to enable Cluster Encryption Configuration"
101 | }
102 |
103 | variable "cluster_encryption_config_kms_key_id" {
104 | type = string
105 | default = ""
106 | description = "KMS Key ID to use for cluster encryption config"
107 | }
108 |
109 | variable "cluster_encryption_config_kms_key_enable_key_rotation" {
110 | type = bool
111 | default = true
112 | description = "Cluster Encryption Config KMS Key Resource argument - enable kms key rotation"
113 | }
114 |
115 | variable "cluster_encryption_config_kms_key_deletion_window_in_days" {
116 | type = number
117 | default = 10
118 | description = "Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction"
119 | }
120 |
121 | variable "cluster_encryption_config_kms_key_policy" {
122 | type = string
123 | default = null
124 | description = "Cluster Encryption Config KMS Key Resource argument - key policy"
125 | }
126 |
127 | variable "cluster_encryption_config_resources" {
128 | type = list(any)
129 | default = ["secrets"]
130 | description = "Cluster Encryption Config Resources to encrypt, e.g. ['secrets']"
131 | }
132 |
133 | variable "addons" {
134 | type = list(object({
135 | addon_name = string
136 | addon_version = string
137 | resolve_conflicts = string
138 | service_account_role_arn = string
139 | }))
140 | default = []
141 | description = "Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources."
142 | }
143 |
144 | variable "apply_config_map_aws_auth" {
145 | type = bool
146 | default = true
147 | description = "Whether to apply the ConfigMap to allow worker nodes to join the EKS cluster and allow additional users, accounts and roles to acces the cluster"
148 | }
149 |
--------------------------------------------------------------------------------
/examples/obsolete-version2/versions.tf:
--------------------------------------------------------------------------------
1 | # Because this module is an historical example, not a living one,
2 | # we are pinning provider versions to historical versions that
3 | # are compatible with the module code.
4 | # We ordinarily do not recommend pinning versions in this way.
5 |
6 | terraform {
7 | required_version = ">= 1.3.0, < 1.6.0"
8 |
9 | required_providers {
10 | aws = {
11 | source = "hashicorp/aws"
12 | # Version 5.0.0 introduced a lot of changes.
13 | version = ">= 3.38, < 5.0.0"
14 | }
15 | kubernetes = {
16 | source = "hashicorp/kubernetes"
17 | # Version 2.25.0 introduced a breaking change.
18 | version = ">= 2.7.1, <= 2.24.0"
19 | }
20 | tls = {
21 | source = "hashicorp/tls"
22 | version = "> 3.1, < 5.0"
23 | }
24 | null = {
25 | source = "hashicorp/null"
26 | version = "> 2.0, < 4.0"
27 | }
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/iam.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | create_eks_service_role = local.enabled && var.create_eks_service_role
3 |
4 | eks_service_role_arn = local.create_eks_service_role ? one(aws_iam_role.default[*].arn) : var.eks_cluster_service_role_arn
5 | }
6 |
7 | data "aws_iam_policy_document" "assume_role" {
8 | count = local.create_eks_service_role ? 1 : 0
9 |
10 | statement {
11 | effect = "Allow"
12 | actions = ["sts:AssumeRole"]
13 |
14 | principals {
15 | type = "Service"
16 | identifiers = ["eks.amazonaws.com"]
17 | }
18 | }
19 | }
20 |
21 | resource "aws_iam_role" "default" {
22 | count = local.create_eks_service_role ? 1 : 0
23 |
24 | name = module.label.id
25 | assume_role_policy = one(data.aws_iam_policy_document.assume_role[*].json)
26 | tags = module.label.tags
27 | permissions_boundary = var.permissions_boundary
28 | }
29 |
30 | resource "aws_iam_role_policy_attachment" "amazon_eks_cluster_policy" {
31 | count = local.create_eks_service_role ? 1 : 0
32 |
33 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSClusterPolicy", one(data.aws_partition.current[*].partition))
34 | role = one(aws_iam_role.default[*].name)
35 | }
36 |
37 | resource "aws_iam_role_policy_attachment" "amazon_eks_service_policy" {
38 | count = local.create_eks_service_role ? 1 : 0
39 |
40 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEKSServicePolicy", one(data.aws_partition.current[*].partition))
41 | role = one(aws_iam_role.default[*].name)
42 | }
43 |
44 | # AmazonEKSClusterPolicy managed policy doesn't contain all necessary permissions to create
45 | # ELB service-linked role required during LB provisioning by Kubernetes.
46 | # Because of that, on a new AWS account (where load balancers have not been provisioned yet, `nginx-ingress` fails to provision a load balancer
47 |
48 | data "aws_iam_policy_document" "cluster_elb_service_role" {
49 | count = local.create_eks_service_role ? 1 : 0
50 |
51 | statement {
52 | sid = "AllowElasticLoadBalancer"
53 | effect = "Allow"
54 | #bridgecrew:skip=BC_AWS_IAM_57:There is no workable constraint to add to this policy
55 | actions = [
56 | "ec2:DescribeAccountAttributes",
57 | "ec2:DescribeAddresses",
58 | "ec2:DescribeInternetGateways",
59 | "elasticloadbalancing:SetIpAddressType",
60 | "elasticloadbalancing:SetSubnets"
61 | ]
62 | resources = ["*"]
63 | }
64 | # Adding a policy to cluster IAM role that deny permissions to logs:CreateLogGroup
65 | # it is not needed since we create the log group elsewhere in this module, and it is causing trouble during "destroy"
66 | statement {
67 | sid = "DenyCreateLogGroup"
68 | effect = "Deny"
69 | actions = [
70 | "logs:CreateLogGroup"
71 | ]
72 | resources = ["*"]
73 | }
74 | }
75 |
76 | resource "aws_iam_policy" "cluster_elb_service_role" {
77 | count = local.create_eks_service_role ? 1 : 0
78 |
79 | name = "${module.label.id}-ServiceRole"
80 | policy = one(data.aws_iam_policy_document.cluster_elb_service_role[*].json)
81 |
82 | tags = module.this.tags
83 | }
84 |
85 | resource "aws_iam_role_policy_attachment" "cluster_elb_service_role" {
86 | count = local.create_eks_service_role ? 1 : 0
87 |
88 | policy_arn = one(aws_iam_policy.cluster_elb_service_role[*].arn)
89 | role = one(aws_iam_role.default[*].name)
90 | }
91 |
--------------------------------------------------------------------------------
/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | enabled = module.this.enabled
3 |
4 | use_ipv6 = var.kubernetes_network_ipv6_enabled
5 |
6 | eks_cluster_id = one(aws_eks_cluster.default[*].id)
7 |
8 | cluster_encryption_config = {
9 | resources = var.cluster_encryption_config_resources
10 |
11 | provider_key_arn = local.enabled && var.cluster_encryption_config_enabled && var.cluster_encryption_config_kms_key_id == "" ? (
12 | one(aws_kms_key.cluster[*].arn)
13 | ) : var.cluster_encryption_config_kms_key_id
14 | }
15 |
16 | cloudwatch_log_group_name = "/aws/eks/${module.label.id}/cluster"
17 | }
18 |
19 | module "label" {
20 | source = "cloudposse/label/null"
21 | version = "0.25.0"
22 |
23 | attributes = var.cluster_attributes
24 |
25 | context = module.this.context
26 | }
27 |
28 | data "aws_partition" "current" {
29 | count = local.enabled ? 1 : 0
30 | }
31 |
32 | resource "aws_cloudwatch_log_group" "default" {
33 | count = local.enabled && length(var.enabled_cluster_log_types) > 0 ? 1 : 0
34 | name = local.cloudwatch_log_group_name
35 | retention_in_days = var.cluster_log_retention_period
36 | kms_key_id = var.cloudwatch_log_group_kms_key_id
37 | tags = module.label.tags
38 | log_group_class = var.cloudwatch_log_group_class
39 | }
40 |
41 | resource "aws_kms_key" "cluster" {
42 | count = local.enabled && var.cluster_encryption_config_enabled && var.cluster_encryption_config_kms_key_id == "" ? 1 : 0
43 | description = "EKS Cluster ${module.label.id} Encryption Config KMS Key"
44 | enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation
45 | deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days
46 | policy = var.cluster_encryption_config_kms_key_policy
47 | tags = module.label.tags
48 | }
49 |
50 | resource "aws_kms_alias" "cluster" {
51 | count = local.enabled && var.cluster_encryption_config_enabled && var.cluster_encryption_config_kms_key_id == "" ? 1 : 0
52 | name = format("alias/%v", module.label.id)
53 | target_key_id = one(aws_kms_key.cluster[*].key_id)
54 | }
55 |
56 | resource "aws_eks_cluster" "default" {
57 | #bridgecrew:skip=BC_AWS_KUBERNETES_1:Allow permissive security group for public access, difficult to restrict without a VPN
58 | #bridgecrew:skip=BC_AWS_KUBERNETES_4:Let user decide on control plane logging, not necessary in non-production environments
59 | count = local.enabled ? 1 : 0
60 | name = module.label.id
61 | tags = module.label.tags
62 | role_arn = local.eks_service_role_arn
63 | version = var.kubernetes_version
64 | enabled_cluster_log_types = var.enabled_cluster_log_types
65 | bootstrap_self_managed_addons = var.bootstrap_self_managed_addons_enabled
66 |
67 | access_config {
68 | authentication_mode = var.access_config.authentication_mode
69 | bootstrap_cluster_creator_admin_permissions = var.access_config.bootstrap_cluster_creator_admin_permissions
70 | }
71 |
72 | lifecycle {
73 | # bootstrap_cluster_creator_admin_permissions is documented as only applying
74 | # to the initial creation of the cluster, and being unreliable afterward,
75 | # so we want to ignore it except at cluster creation time.
76 | ignore_changes = [access_config[0].bootstrap_cluster_creator_admin_permissions]
77 | }
78 |
79 | dynamic "encryption_config" {
80 | #bridgecrew:skip=BC_AWS_KUBERNETES_3:Let user decide secrets encryption, mainly because changing this value requires completely destroying the cluster
81 | for_each = var.cluster_encryption_config_enabled ? [local.cluster_encryption_config] : []
82 | content {
83 | resources = encryption_config.value.resources
84 | provider {
85 | key_arn = encryption_config.value.provider_key_arn
86 | }
87 | }
88 | }
89 |
90 | vpc_config {
91 | security_group_ids = var.associated_security_group_ids
92 | subnet_ids = var.subnet_ids
93 | endpoint_private_access = var.endpoint_private_access
94 | #bridgecrew:skip=BC_AWS_KUBERNETES_2:Let user decide on public access
95 | endpoint_public_access = var.endpoint_public_access
96 | public_access_cidrs = var.public_access_cidrs
97 | }
98 |
99 | dynamic "kubernetes_network_config" {
100 | for_each = local.use_ipv6 ? [] : compact([var.service_ipv4_cidr])
101 | content {
102 | service_ipv4_cidr = kubernetes_network_config.value
103 | }
104 | }
105 |
106 | dynamic "kubernetes_network_config" {
107 | for_each = local.use_ipv6 ? [true] : []
108 | content {
109 | ip_family = "ipv6"
110 | }
111 | }
112 |
113 | dynamic "upgrade_policy" {
114 | for_each = var.upgrade_policy != null ? [var.upgrade_policy] : []
115 | content {
116 | support_type = upgrade_policy.value.support_type
117 | }
118 | }
119 |
120 | dynamic "zonal_shift_config" {
121 | for_each = var.zonal_shift_config != null ? [var.zonal_shift_config] : []
122 | content {
123 | enabled = zonal_shift_config.value.enabled
124 | }
125 | }
126 |
127 | depends_on = [
128 | aws_iam_role.default,
129 | aws_iam_role_policy_attachment.cluster_elb_service_role,
130 | aws_iam_role_policy_attachment.amazon_eks_cluster_policy,
131 | aws_iam_role_policy_attachment.amazon_eks_service_policy,
132 | aws_kms_alias.cluster,
133 | aws_cloudwatch_log_group.default,
134 | var.associated_security_group_ids,
135 | var.cluster_depends_on,
136 | var.subnet_ids,
137 | ]
138 | }
139 |
140 | # Enabling IAM Roles for Service Accounts in Kubernetes cluster
141 | #
142 | # From official docs:
143 | # The IAM roles for service accounts feature is available on new Amazon EKS Kubernetes version 1.14 clusters,
144 | # and clusters that were updated to versions 1.14 or 1.13 on or after September 3rd, 2019.
145 | #
146 | # https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html
147 | # https://medium.com/@marcincuber/amazon-eks-with-oidc-provider-iam-roles-for-kubernetes-services-accounts-59015d15cb0c
148 | #
149 |
150 | data "tls_certificate" "cluster" {
151 | count = local.enabled && var.oidc_provider_enabled ? 1 : 0
152 | url = one(aws_eks_cluster.default[*].identity[0].oidc[0].issuer)
153 | }
154 |
155 | resource "aws_iam_openid_connect_provider" "default" {
156 | count = local.enabled && var.oidc_provider_enabled ? 1 : 0
157 | url = one(aws_eks_cluster.default[*].identity[0].oidc[0].issuer)
158 | tags = module.label.tags
159 |
160 | client_id_list = ["sts.amazonaws.com"]
161 | thumbprint_list = [one(data.tls_certificate.cluster[*].certificates[0].sha1_fingerprint)]
162 | }
163 |
164 | resource "aws_eks_addon" "cluster" {
165 | for_each = local.enabled ? {
166 | for addon in var.addons :
167 | addon.addon_name => addon
168 | } : {}
169 |
170 | cluster_name = one(aws_eks_cluster.default[*].name)
171 | addon_name = each.key
172 | addon_version = lookup(each.value, "addon_version", null)
173 | configuration_values = lookup(each.value, "configuration_values", null)
174 | resolve_conflicts_on_create = lookup(each.value, "resolve_conflicts_on_create", try(replace(each.value.resolve_conflicts, "PRESERVE", "NONE"), null))
175 | resolve_conflicts_on_update = lookup(each.value, "resolve_conflicts_on_update", lookup(each.value, "resolve_conflicts", null))
176 | service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
177 |
178 | tags = merge(module.label.tags, each.value.additional_tags)
179 |
180 | depends_on = [
181 | var.addons_depends_on,
182 | aws_eks_cluster.default,
183 | # OIDC provider is prerequisite for some addons. See, for example,
184 | # https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html
185 | aws_iam_openid_connect_provider.default,
186 | ]
187 |
188 | timeouts {
189 | create = each.value.create_timeout
190 | update = each.value.update_timeout
191 | delete = each.value.delete_timeout
192 | }
193 | }
194 |
--------------------------------------------------------------------------------
/outputs.tf:
--------------------------------------------------------------------------------
1 | output "eks_cluster_id" {
2 | description = "The name of the cluster"
3 | value = one(aws_eks_cluster.default[*].id)
4 | }
5 |
6 | output "eks_cluster_arn" {
7 | description = "The Amazon Resource Name (ARN) of the cluster"
8 | value = one(aws_eks_cluster.default[*].arn)
9 | }
10 |
11 | output "eks_cluster_endpoint" {
12 | description = "The endpoint for the Kubernetes API server"
13 | value = one(aws_eks_cluster.default[*].endpoint)
14 | }
15 |
16 | output "eks_cluster_version" {
17 | description = "The Kubernetes server version of the cluster"
18 | value = one(aws_eks_cluster.default[*].version)
19 | }
20 |
21 | output "eks_cluster_identity_oidc_issuer" {
22 | description = "The OIDC Identity issuer for the cluster"
23 | value = one(aws_eks_cluster.default[*].identity[0].oidc[0].issuer)
24 | }
25 |
26 | output "eks_cluster_identity_oidc_issuer_arn" {
27 | description = "The OIDC Identity issuer ARN for the cluster that can be used to associate IAM roles with a service account"
28 | value = one(aws_iam_openid_connect_provider.default[*].arn)
29 | }
30 |
31 | output "eks_cluster_certificate_authority_data" {
32 | description = "The Kubernetes cluster certificate authority data"
33 | value = local.certificate_authority_data
34 | }
35 |
36 | output "eks_cluster_managed_security_group_id" {
37 | description = <<-EOT
38 | Security Group ID that was created by EKS for the cluster.
39 | EKS creates a Security Group and applies it to the ENI that are attached to EKS Control Plane master nodes and to any managed workloads.
40 | EOT
41 | value = one(aws_eks_cluster.default[*].vpc_config[0].cluster_security_group_id)
42 | }
43 |
44 | output "eks_cluster_role_arn" {
45 | description = "ARN of the EKS cluster IAM role"
46 | value = local.eks_service_role_arn
47 | }
48 |
49 | output "eks_cluster_ipv4_service_cidr" {
50 | description = <<-EOT
51 | The IPv4 CIDR block that Kubernetes pod and service IP addresses are assigned from
52 | if `kubernetes_network_ipv6_enabled` is set to false. If set to true this output will be null.
53 | EOT
54 | value = one(aws_eks_cluster.default[*].kubernetes_network_config[0].service_ipv4_cidr)
55 | }
56 |
57 | output "eks_cluster_ipv6_service_cidr" {
58 | description = <<-EOT
59 | The IPv6 CIDR block that Kubernetes pod and service IP addresses are assigned from
60 | if `kubernetes_network_ipv6_enabled` is set to true. If set to false this output will be null.
61 | EOT
62 | value = one(aws_eks_cluster.default[*].kubernetes_network_config[0].service_ipv6_cidr)
63 | }
64 |
65 | output "eks_addons_versions" {
66 | description = "Map of enabled EKS Addons names and versions"
67 | value = local.enabled ? {
68 | for addon in aws_eks_addon.cluster :
69 | addon.addon_name => addon.addon_version
70 | } : {}
71 | }
72 |
73 | output "cluster_encryption_config_enabled" {
74 | description = "If true, Cluster Encryption Configuration is enabled"
75 | value = var.cluster_encryption_config_enabled
76 | }
77 |
78 | output "cluster_encryption_config_resources" {
79 | description = "Cluster Encryption Config Resources"
80 | value = var.cluster_encryption_config_resources
81 | }
82 |
83 | output "cluster_encryption_config_provider_key_arn" {
84 | description = "Cluster Encryption Config KMS Key ARN"
85 | value = local.cluster_encryption_config.provider_key_arn
86 | }
87 |
88 | output "cluster_encryption_config_provider_key_alias" {
89 | description = "Cluster Encryption Config KMS Key Alias ARN"
90 | value = one(aws_kms_alias.cluster[*].arn)
91 | }
92 |
93 | output "cloudwatch_log_group_name" {
94 | description = "The name of the log group created in cloudwatch where cluster logs are forwarded to if enabled"
95 | value = local.cloudwatch_log_group_name
96 | }
97 |
98 | output "cloudwatch_log_group_kms_key_id" {
99 | description = "KMS Key ID to encrypt AWS CloudWatch logs"
100 | value = var.cloudwatch_log_group_kms_key_id
101 | }
102 |
--------------------------------------------------------------------------------
/security-group.tf:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------
2 | # Rules for EKS-managed Security Group
3 | # -----------------------------------------------------------------------
4 |
5 | locals {
6 | cluster_security_group_id = one(aws_eks_cluster.default[*].vpc_config[0].cluster_security_group_id)
7 | managed_security_group_rules_enabled = local.enabled && var.managed_security_group_rules_enabled
8 | }
9 |
10 | resource "aws_vpc_security_group_ingress_rule" "managed_ingress_security_groups" {
11 | count = local.managed_security_group_rules_enabled ? length(var.allowed_security_group_ids) : 0
12 |
13 | description = "Allow inbound traffic from existing Security Groups"
14 | ip_protocol = "-1"
15 | referenced_security_group_id = var.allowed_security_group_ids[count.index]
16 | security_group_id = local.cluster_security_group_id
17 | }
18 |
19 | resource "aws_vpc_security_group_ingress_rule" "managed_ingress_cidr_blocks" {
20 | count = local.managed_security_group_rules_enabled ? length(var.allowed_cidr_blocks) : 0
21 |
22 | description = "Allow inbound traffic from CIDR blocks"
23 | ip_protocol = "-1"
24 | cidr_ipv4 = var.allowed_cidr_blocks[count.index]
25 | security_group_id = local.cluster_security_group_id
26 | }
27 |
28 | resource "aws_vpc_security_group_ingress_rule" "custom_ingress_rules" {
29 | for_each = { for sg_rule in var.custom_ingress_rules : sg_rule.source_security_group_id => sg_rule }
30 |
31 | description = each.value.description
32 | from_port = each.value.from_port
33 | to_port = each.value.to_port
34 | ip_protocol = each.value.protocol
35 | referenced_security_group_id = each.value.source_security_group_id
36 | security_group_id = local.cluster_security_group_id
37 | }
38 |
--------------------------------------------------------------------------------
/test/.gitignore:
--------------------------------------------------------------------------------
1 | .test-harness
2 |
--------------------------------------------------------------------------------
/test/Makefile:
--------------------------------------------------------------------------------
1 | TEST_HARNESS ?= https://github.com/cloudposse/test-harness.git
2 | TEST_HARNESS_BRANCH ?= master
3 | TEST_HARNESS_PATH = $(realpath .test-harness)
4 | BATS_ARGS ?= --tap
5 | BATS_LOG ?= test.log
6 |
7 | # Define a macro to run the tests
8 | define RUN_TESTS
9 | @echo "Running tests in $(1)"
10 | @cd $(1) && bats $(BATS_ARGS) $(addsuffix .bats,$(addprefix $(TEST_HARNESS_PATH)/test/terraform/,$(TESTS)))
11 | endef
12 |
13 | default: all
14 |
15 | -include Makefile.*
16 |
17 | ## Provision the test-harnesss
18 | .test-harness:
19 | [ -d $@ ] || git clone --depth=1 -b $(TEST_HARNESS_BRANCH) $(TEST_HARNESS) $@
20 |
21 | ## Initialize the tests
22 | init: .test-harness
23 |
24 | ## Install all dependencies (OS specific)
25 | deps::
26 | @exit 0
27 |
28 | ## Clean up the test harness
29 | clean:
30 | [ "$(TEST_HARNESS_PATH)" == "/" ] || rm -rf $(TEST_HARNESS_PATH)
31 |
32 | ## Run all tests
33 | all: module examples/complete
34 |
35 | ## Run basic sanity checks against the module itself
36 | # disable provider-pinning test due to https://github.com/hashicorp/terraform-provider-tls/issues/244
37 | #module: export TESTS ?= installed lint module-pinning provider-pinning validate terraform-docs input-descriptions output-descriptions
38 | module: export TESTS ?= installed lint module-pinning validate terraform-docs input-descriptions output-descriptions
39 | module: deps
40 | $(call RUN_TESTS, ../)
41 |
42 | ## Run tests against example
43 | examples/complete: export TESTS ?= installed lint validate
44 | examples/complete: deps
45 | $(call RUN_TESTS, ../$@)
46 |
--------------------------------------------------------------------------------
/test/Makefile.alpine:
--------------------------------------------------------------------------------
1 | ifneq (,$(wildcard /sbin/apk))
2 | ## Install all dependencies for alpine
3 | deps:: init
4 | @apk add --update terraform-docs@cloudposse json2hcl@cloudposse
5 | endif
6 |
--------------------------------------------------------------------------------
/test/src/.gitignore:
--------------------------------------------------------------------------------
1 | .gopath
2 | vendor/
3 |
--------------------------------------------------------------------------------
/test/src/Makefile:
--------------------------------------------------------------------------------
1 | export TERRAFORM_VERSION ?= $(shell curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version' | cut -d. -f1)
2 |
3 | .DEFAULT_GOAL : all
4 | .PHONY: all
5 |
6 | ## Default target
7 | all: test
8 |
9 | .PHONY : init
10 | ## Initialize tests
11 | init:
12 | @exit 0
13 |
14 | .PHONY : test
15 | ## Run tests
16 | test: init
17 | go mod download
18 | go test -v -timeout 60m
19 |
20 | ## Run tests in docker container
21 | docker/test:
22 | docker run --name terratest --rm -it -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN -e GITHUB_TOKEN \
23 | -e PATH="/usr/local/terraform/$(TERRAFORM_VERSION)/bin:/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
24 | -v $(CURDIR)/../../:/module/ cloudposse/test-harness:latest -C /module/test/src test
25 |
26 | .PHONY : clean
27 | ## Clean up files
28 | clean:
29 | rm -rf ../../examples/complete/*.tfstate*
30 |
--------------------------------------------------------------------------------
/test/src/examples_complete_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "encoding/base64"
5 | "fmt"
6 | testStructure "github.com/gruntwork-io/terratest/modules/test-structure"
7 | "k8s.io/apimachinery/pkg/util/runtime"
8 | "regexp"
9 | "strings"
10 | "sync/atomic"
11 | "testing"
12 | "time"
13 |
14 | "github.com/gruntwork-io/terratest/modules/random"
15 | "github.com/gruntwork-io/terratest/modules/terraform"
16 | "github.com/stretchr/testify/assert"
17 |
18 | corev1 "k8s.io/api/core/v1"
19 | "k8s.io/client-go/informers"
20 | "k8s.io/client-go/kubernetes"
21 | "k8s.io/client-go/rest"
22 | "k8s.io/client-go/tools/cache"
23 | "sigs.k8s.io/aws-iam-authenticator/pkg/token"
24 |
25 | "github.com/aws/aws-sdk-go/aws"
26 | "github.com/aws/aws-sdk-go/aws/session"
27 | "github.com/aws/aws-sdk-go/service/eks"
28 | )
29 |
30 | func newClientset(cluster *eks.Cluster) (*kubernetes.Clientset, error) {
31 | gen, err := token.NewGenerator(true, false)
32 | if err != nil {
33 | return nil, err
34 | }
35 | opts := &token.GetTokenOptions{
36 | ClusterID: aws.StringValue(cluster.Name),
37 | }
38 | tok, err := gen.GetWithOptions(opts)
39 | if err != nil {
40 | return nil, err
41 | }
42 | ca, err := base64.StdEncoding.DecodeString(aws.StringValue(cluster.CertificateAuthority.Data))
43 | if err != nil {
44 | return nil, err
45 | }
46 | clientset, err := kubernetes.NewForConfig(
47 | &rest.Config{
48 | Host: aws.StringValue(cluster.Endpoint),
49 | BearerToken: tok.Token,
50 | TLSClientConfig: rest.TLSClientConfig{
51 | CAData: ca,
52 | },
53 | },
54 | )
55 | if err != nil {
56 | return nil, err
57 | }
58 | return clientset, nil
59 | }
60 |
61 | // Test the Terraform module in examples/complete using Terratest.
62 | func TestExamplesComplete(t *testing.T) {
63 |
64 | randId := strings.ToLower(random.UniqueId())
65 | attributes := []string{randId}
66 |
67 | terraformOptions := &terraform.Options{
68 | // The path to where our Terraform code is located
69 | TerraformDir: "../../examples/complete",
70 | Upgrade: true,
71 | // Variables to pass to our Terraform code using -var-file options
72 | VarFiles: []string{"fixtures.us-east-2.tfvars"},
73 | Vars: map[string]interface{}{
74 | "attributes": attributes,
75 | },
76 | }
77 |
78 | // At the end of the test, run `terraform destroy` to clean up any resources that were created
79 | defer terraform.Destroy(t, terraformOptions)
80 |
81 | // If Go runtime crushes, run `terraform destroy` to clean up any resources that were created
82 | defer runtime.HandleCrash(func(i interface{}) {
83 | terraform.Destroy(t, terraformOptions)
84 | })
85 |
86 | // This will run `terraform init` and `terraform apply` and fail the test if there are any errors
87 | terraform.InitAndApply(t, terraformOptions)
88 |
89 | // Run `terraform output` to get the value of an output variable
90 | vpcCidr := terraform.Output(t, terraformOptions, "vpc_cidr")
91 | // Verify we're getting back the outputs we expect
92 | assert.Equal(t, "172.16.0.0/16", vpcCidr)
93 |
94 | // Run `terraform output` to get the value of an output variable
95 | privateSubnetCidrs := terraform.OutputList(t, terraformOptions, "private_subnet_cidrs")
96 | // Verify we're getting back the outputs we expect
97 | assert.Equal(t, []string{"172.16.0.0/19", "172.16.32.0/19"}, privateSubnetCidrs)
98 |
99 | // Run `terraform output` to get the value of an output variable
100 | publicSubnetCidrs := terraform.OutputList(t, terraformOptions, "public_subnet_cidrs")
101 | // Verify we're getting back the outputs we expect
102 | assert.Equal(t, []string{"172.16.96.0/19", "172.16.128.0/19"}, publicSubnetCidrs)
103 |
104 | // Run `terraform output` to get the value of an output variable
105 | eksClusterId := terraform.Output(t, terraformOptions, "eks_cluster_id")
106 | // Verify we're getting back the outputs we expect
107 | assert.Equal(t, "eg-test-eks-"+randId+"-cluster", eksClusterId)
108 |
109 | // Run `terraform output` to get the value of an output variable
110 | eksNodeGroupId := terraform.Output(t, terraformOptions, "eks_node_group_id")
111 | // Verify we're getting back the outputs we expect
112 | // The node group ID will have a random pet name appended to it
113 | assert.Contains(t, eksNodeGroupId, "eg-test-eks-"+randId+"-cluster:eg-test-eks-"+randId+"-workers-")
114 |
115 | // Run `terraform output` to get the value of an output variable
116 | eksNodeGroupRoleName := terraform.Output(t, terraformOptions, "eks_node_group_role_name")
117 | // Verify we're getting back the outputs we expect
118 | assert.Equal(t, "eg-test-eks-"+randId+"-workers", eksNodeGroupRoleName)
119 |
120 | // Run `terraform output` to get the value of an output variable
121 | eksNodeGroupStatus := terraform.Output(t, terraformOptions, "eks_node_group_status")
122 | // Verify we're getting back the outputs we expect
123 | assert.Equal(t, "ACTIVE", eksNodeGroupStatus)
124 |
125 | // Wait for the worker nodes to join the cluster
126 | // https://github.com/kubernetes/client-go
127 | // https://www.rushtehrani.com/post/using-kubernetes-api
128 | // https://rancher.com/using-kubernetes-api-go-kubecon-2017-session-recap
129 | // https://gianarb.it/blog/kubernetes-shared-informer
130 | // https://stackoverflow.com/questions/60547409/unable-to-obtain-kubeconfig-of-an-aws-eks-cluster-in-go-code/60573982#60573982
131 | fmt.Println("Waiting for worker nodes to join the EKS cluster")
132 |
133 | clusterName := "eg-test-eks-" + randId + "-cluster"
134 | region := "us-east-2"
135 |
136 | sess := session.Must(session.NewSession(&aws.Config{
137 | Region: aws.String(region),
138 | }))
139 |
140 | eksSvc := eks.New(sess)
141 |
142 | input := &eks.DescribeClusterInput{
143 | Name: aws.String(clusterName),
144 | }
145 |
146 | result, err := eksSvc.DescribeCluster(input)
147 | assert.NoError(t, err)
148 |
149 | clientset, err := newClientset(result.Cluster)
150 | assert.NoError(t, err)
151 |
152 | factory := informers.NewSharedInformerFactory(clientset, 0)
153 | informer := factory.Core().V1().Nodes().Informer()
154 | stopChannel := make(chan struct{})
155 | var countOfWorkerNodes uint64 = 0
156 |
157 | informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
158 | AddFunc: func(obj interface{}) {
159 | node := obj.(*corev1.Node)
160 | fmt.Printf("Worker Node %s has joined the EKS cluster at %s\n", node.Name, node.CreationTimestamp)
161 | atomic.AddUint64(&countOfWorkerNodes, 1)
162 | if countOfWorkerNodes > 1 {
163 | close(stopChannel)
164 | }
165 | },
166 | })
167 |
168 | go informer.Run(stopChannel)
169 |
170 | select {
171 | case <-stopChannel:
172 | msg := "All worker nodes have joined the EKS cluster"
173 | fmt.Println(msg)
174 | case <-time.After(5 * time.Minute):
175 | msg := "Not all worker nodes have joined the EKS cluster"
176 | fmt.Println(msg)
177 | assert.Fail(t, msg)
178 | }
179 | }
180 |
181 | func TestExamplesCompleteDisabled(t *testing.T) {
182 | t.Parallel()
183 | randID := strings.ToLower(random.UniqueId())
184 | attributes := []string{randID}
185 |
186 | rootFolder := "../../"
187 | terraformFolderRelativeToRoot := "examples/complete"
188 | varFiles := []string{"fixtures.us-east-2.tfvars"}
189 |
190 | tempTestFolder := testStructure.CopyTerraformFolderToTemp(t, rootFolder, terraformFolderRelativeToRoot)
191 |
192 | terraformOptions := &terraform.Options{
193 | // The path to where our Terraform code is located
194 | TerraformDir: tempTestFolder,
195 | Upgrade: true,
196 | // Variables to pass to our Terraform code using -var-file options
197 | VarFiles: varFiles,
198 | Vars: map[string]interface{}{
199 | "attributes": attributes,
200 | "enabled": false,
201 | },
202 | }
203 |
204 | // At the end of the test, run `terraform destroy` to clean up any resources that were created
205 | defer terraform.Destroy(t, terraformOptions)
206 |
207 | // This will run `terraform init` and `terraform apply` and fail the test if there are any errors
208 | results := terraform.InitAndApply(t, terraformOptions)
209 |
210 | // Should complete successfully without creating or changing any resources.
211 | // Extract the "Resources:" section of the output to make the error message more readable.
212 | re := regexp.MustCompile(`Resources: [^.]+\.`)
213 | match := re.FindString(results)
214 | assert.Equal(t, "Resources: 0 added, 0 changed, 0 destroyed.", match, "Applying with enabled=false should not create any resources")
215 | }
216 |
--------------------------------------------------------------------------------
/test/src/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/cloudposse/terraform-aws-eks-cluster
2 |
3 | go 1.21
4 |
5 | require (
6 | github.com/aws/aws-sdk-go v1.50.30
7 | github.com/gruntwork-io/terratest v0.46.15
8 | github.com/stretchr/testify v1.9.0
9 | k8s.io/api v0.29.2
10 | k8s.io/apimachinery v0.29.2
11 | k8s.io/client-go v0.29.2
12 | sigs.k8s.io/aws-iam-authenticator v0.6.18
13 | )
14 |
15 | require (
16 | cloud.google.com/go v0.112.1 // indirect
17 | cloud.google.com/go/compute v1.24.0 // indirect
18 | cloud.google.com/go/compute/metadata v0.2.3 // indirect
19 | cloud.google.com/go/iam v1.1.6 // indirect
20 | cloud.google.com/go/storage v1.39.0 // indirect
21 | github.com/agext/levenshtein v1.2.3 // indirect
22 | github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
23 | github.com/beorn7/perks v1.0.1 // indirect
24 | github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
25 | github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
26 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
27 | github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
28 | github.com/davecgh/go-spew v1.1.1 // indirect
29 | github.com/emicklei/go-restful/v3 v3.11.3 // indirect
30 | github.com/felixge/httpsnoop v1.0.4 // indirect
31 | github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
32 | github.com/go-logr/logr v1.4.1 // indirect
33 | github.com/go-logr/stdr v1.2.2 // indirect
34 | github.com/go-openapi/jsonpointer v0.20.2 // indirect
35 | github.com/go-openapi/jsonreference v0.20.4 // indirect
36 | github.com/go-openapi/swag v0.22.9 // indirect
37 | github.com/go-sql-driver/mysql v1.4.1 // indirect
38 | github.com/gofrs/flock v0.8.1 // indirect
39 | github.com/gogo/protobuf v1.3.2 // indirect
40 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
41 | github.com/golang/protobuf v1.5.3 // indirect
42 | github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
43 | github.com/google/go-cmp v0.6.0 // indirect
44 | github.com/google/gofuzz v1.2.0 // indirect
45 | github.com/google/s2a-go v0.1.7 // indirect
46 | github.com/google/uuid v1.6.0 // indirect
47 | github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
48 | github.com/googleapis/gax-go/v2 v2.12.2 // indirect
49 | github.com/gruntwork-io/go-commons v0.8.0 // indirect
50 | github.com/hashicorp/errwrap v1.1.0 // indirect
51 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
52 | github.com/hashicorp/go-getter v1.7.5 // indirect
53 | github.com/hashicorp/go-multierror v1.1.1 // indirect
54 | github.com/hashicorp/go-safetemp v1.0.0 // indirect
55 | github.com/hashicorp/go-version v1.6.0 // indirect
56 | github.com/hashicorp/hcl/v2 v2.20.0 // indirect
57 | github.com/hashicorp/terraform-json v0.21.0 // indirect
58 | github.com/imdario/mergo v0.3.11 // indirect
59 | github.com/jinzhu/copier v0.4.0 // indirect
60 | github.com/jmespath/go-jmespath v0.4.0 // indirect
61 | github.com/josharian/intern v1.0.0 // indirect
62 | github.com/json-iterator/go v1.1.12 // indirect
63 | github.com/klauspost/compress v1.17.7 // indirect
64 | github.com/mailru/easyjson v0.7.7 // indirect
65 | github.com/mattn/go-zglob v0.0.4 // indirect
66 | github.com/mitchellh/go-homedir v1.1.0 // indirect
67 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect
68 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect
69 | github.com/moby/spdystream v0.2.0 // indirect
70 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
71 | github.com/modern-go/reflect2 v1.0.2 // indirect
72 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
73 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
74 | github.com/pmezard/go-difflib v1.0.0 // indirect
75 | github.com/pquerna/otp v1.2.0 // indirect
76 | github.com/prometheus/client_golang v1.19.0 // indirect
77 | github.com/prometheus/client_model v0.6.0 // indirect
78 | github.com/prometheus/common v0.49.0 // indirect
79 | github.com/prometheus/procfs v0.12.0 // indirect
80 | github.com/russross/blackfriday/v2 v2.1.0 // indirect
81 | github.com/sirupsen/logrus v1.9.3 // indirect
82 | github.com/spf13/pflag v1.0.5 // indirect
83 | github.com/tmccombs/hcl2json v0.6.1 // indirect
84 | github.com/ulikunitz/xz v0.5.11 // indirect
85 | github.com/urfave/cli v1.22.2 // indirect
86 | github.com/zclconf/go-cty v1.14.3 // indirect
87 | go.opencensus.io v0.24.0 // indirect
88 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
89 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
90 | go.opentelemetry.io/otel v1.24.0 // indirect
91 | go.opentelemetry.io/otel/metric v1.24.0 // indirect
92 | go.opentelemetry.io/otel/trace v1.24.0 // indirect
93 | golang.org/x/crypto v0.21.0 // indirect
94 | golang.org/x/mod v0.15.0 // indirect
95 | golang.org/x/net v0.23.0 // indirect
96 | golang.org/x/oauth2 v0.17.0 // indirect
97 | golang.org/x/sync v0.6.0 // indirect
98 | golang.org/x/sys v0.18.0 // indirect
99 | golang.org/x/term v0.18.0 // indirect
100 | golang.org/x/text v0.14.0 // indirect
101 | golang.org/x/time v0.5.0 // indirect
102 | golang.org/x/tools v0.18.0 // indirect
103 | google.golang.org/api v0.167.0 // indirect
104 | google.golang.org/appengine v1.6.8 // indirect
105 | google.golang.org/genproto v0.0.0-20240228224816-df926f6c8641 // indirect
106 | google.golang.org/genproto/googleapis/api v0.0.0-20240228224816-df926f6c8641 // indirect
107 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240228224816-df926f6c8641 // indirect
108 | google.golang.org/grpc v1.62.0 // indirect
109 | google.golang.org/protobuf v1.33.0 // indirect
110 | gopkg.in/inf.v0 v0.9.1 // indirect
111 | gopkg.in/yaml.v2 v2.4.0 // indirect
112 | gopkg.in/yaml.v3 v3.0.1 // indirect
113 | k8s.io/klog/v2 v2.120.1 // indirect
114 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
115 | k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
116 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
117 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
118 | sigs.k8s.io/yaml v1.4.0 // indirect
119 | )
120 |
--------------------------------------------------------------------------------
/variables.tf:
--------------------------------------------------------------------------------
1 | # tflint-ignore: terraform_unused_declarations
2 | variable "region" {
3 |
4 | type = string
5 | description = "OBSOLETE (not needed): AWS Region"
6 | default = null
7 | }
8 |
9 | variable "subnet_ids" {
10 | type = list(string)
11 | description = "A list of subnet IDs to launch the cluster in"
12 | }
13 |
14 | variable "associated_security_group_ids" {
15 | type = list(string)
16 | default = []
17 | description = <<-EOT
18 | A list of IDs of Security Groups to associate the cluster with.
19 | These security groups will not be modified.
20 | EOT
21 | }
22 |
23 | variable "cluster_depends_on" {
24 | type = any
25 | description = <<-EOT
26 | If provided, the EKS will depend on this object, and therefore not be created until this object is finalized.
27 | This is useful if you want to ensure that the cluster is not created before some other condition is met, e.g. VPNs into the subnet are created.
28 | EOT
29 | default = null
30 | }
31 |
32 | variable "create_eks_service_role" {
33 | type = bool
34 | description = "Set `false` to use existing `eks_cluster_service_role_arn` instead of creating one"
35 | default = true
36 | }
37 |
38 | variable "eks_cluster_service_role_arn" {
39 | type = string
40 | description = <<-EOT
41 | The ARN of an IAM role for the EKS cluster to use that provides permissions
42 | for the Kubernetes control plane to perform needed AWS API operations.
43 | Required if `create_eks_service_role` is `false`, ignored otherwise.
44 | EOT
45 | default = null
46 | }
47 |
48 |
49 | variable "kubernetes_version" {
50 | type = string
51 | description = "Desired Kubernetes master version. If you do not specify a value, the latest available version is used"
52 | default = "1.21"
53 | }
54 |
55 | variable "oidc_provider_enabled" {
56 | type = bool
57 | description = <<-EOT
58 | Create an IAM OIDC identity provider for the cluster, then you can create IAM roles to associate with a
59 | service account in the cluster, instead of using kiam or kube2iam. For more information,
60 | see [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html).
61 | EOT
62 | default = false
63 | }
64 |
65 | variable "endpoint_private_access" {
66 | type = bool
67 | description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled. Default to AWS EKS resource and it is false"
68 | default = false
69 | }
70 |
71 | variable "endpoint_public_access" {
72 | type = bool
73 | description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled. Default to AWS EKS resource and it is true"
74 | default = true
75 | }
76 |
77 | variable "public_access_cidrs" {
78 | type = list(string)
79 | description = "Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with 0.0.0.0/0."
80 | default = ["0.0.0.0/0"]
81 | }
82 |
83 | variable "service_ipv4_cidr" {
84 | type = string
85 | description = <<-EOT
86 | The CIDR block to assign Kubernetes service IP addresses from.
87 | You can only specify a custom CIDR block when you create a cluster, changing this value will force a new cluster to be created.
88 | EOT
89 | default = null
90 | }
91 |
92 | variable "kubernetes_network_ipv6_enabled" {
93 | type = bool
94 | description = "Set true to use IPv6 addresses for Kubernetes pods and services"
95 | default = false
96 | }
97 |
98 | variable "enabled_cluster_log_types" {
99 | type = list(string)
100 | description = "A list of the desired control plane logging to enable. For more information, see https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html. Possible values [`api`, `audit`, `authenticator`, `controllerManager`, `scheduler`]"
101 | default = []
102 | }
103 |
104 | variable "cluster_log_retention_period" {
105 | type = number
106 | description = "Number of days to retain cluster logs. Requires `enabled_cluster_log_types` to be set. See https://docs.aws.amazon.com/en_us/eks/latest/userguide/control-plane-logs.html."
107 | default = 0
108 | }
109 |
110 | variable "cluster_encryption_config_enabled" {
111 | type = bool
112 | description = "Set to `true` to enable Cluster Encryption Configuration"
113 | default = true
114 | }
115 |
116 | variable "cluster_encryption_config_kms_key_id" {
117 | type = string
118 | description = "KMS Key ID to use for cluster encryption config"
119 | default = ""
120 | }
121 |
122 | variable "cluster_encryption_config_kms_key_enable_key_rotation" {
123 | type = bool
124 | description = "Cluster Encryption Config KMS Key Resource argument - enable kms key rotation"
125 | default = true
126 | }
127 |
128 | variable "cluster_encryption_config_kms_key_deletion_window_in_days" {
129 | type = number
130 | description = "Cluster Encryption Config KMS Key Resource argument - key deletion windows in days post destruction"
131 | default = 10
132 | }
133 |
134 | variable "cluster_encryption_config_kms_key_policy" {
135 | type = string
136 | description = "Cluster Encryption Config KMS Key Resource argument - key policy"
137 | default = null
138 | }
139 |
140 | variable "cluster_encryption_config_resources" {
141 | type = list(any)
142 | description = "Cluster Encryption Config Resources to encrypt, e.g. ['secrets']"
143 | default = ["secrets"]
144 | }
145 |
146 | variable "permissions_boundary" {
147 | type = string
148 | description = "If provided, all IAM roles will be created with this permissions boundary attached"
149 | default = null
150 | }
151 |
152 | variable "cloudwatch_log_group_kms_key_id" {
153 | type = string
154 | description = "If provided, the KMS Key ID to use to encrypt AWS CloudWatch logs"
155 | default = null
156 | }
157 |
158 | variable "cloudwatch_log_group_class" {
159 | type = string
160 | description = "Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`"
161 | default = null
162 | }
163 |
164 | variable "addons" {
165 | type = list(object({
166 | addon_name = string
167 | addon_version = optional(string, null)
168 | configuration_values = optional(string, null)
169 | # resolve_conflicts is deprecated, but we keep it for backwards compatibility
170 | # and because if not declared, Terraform will silently ignore it.
171 | resolve_conflicts = optional(string, null)
172 | resolve_conflicts_on_create = optional(string, null)
173 | resolve_conflicts_on_update = optional(string, null)
174 | service_account_role_arn = optional(string, null)
175 | create_timeout = optional(string, null)
176 | update_timeout = optional(string, null)
177 | delete_timeout = optional(string, null)
178 | additional_tags = optional(map(string), {})
179 | }))
180 | description = <<-EOT
181 | Manages [`aws_eks_addon`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) resources.
182 | Note: `resolve_conflicts` is deprecated. If `resolve_conflicts` is set and
183 | `resolve_conflicts_on_create` or `resolve_conflicts_on_update` is not set,
184 | `resolve_conflicts` will be used instead. If `resolve_conflicts_on_create` is
185 | not set and `resolve_conflicts` is `PRESERVE`, `resolve_conflicts_on_create`
186 | will be set to `NONE`.
187 | If `additional_tags` are specified, they are added to the standard resource tags.
188 | EOT
189 | default = []
190 | }
191 |
192 | variable "addons_depends_on" {
193 | type = any
194 | description = <<-EOT
195 | If provided, all addons will depend on this object, and therefore not be installed until this object is finalized.
196 | This is useful if you want to ensure that addons are not applied before some other condition is met, e.g. node groups are created.
197 | See [issue #170](https://github.com/cloudposse/terraform-aws-eks-cluster/issues/170) for more details.
198 | EOT
199 | default = null
200 | }
201 |
202 | variable "bootstrap_self_managed_addons_enabled" {
203 | description = "Manages bootstrap of default networking addons after cluster has been created"
204 | type = bool
205 | default = null
206 | }
207 |
208 | variable "upgrade_policy" {
209 | type = object({
210 | support_type = optional(string, null)
211 | })
212 | description = "Configuration block for the support policy to use for the cluster"
213 | default = null
214 | }
215 |
216 | variable "zonal_shift_config" {
217 | type = object({
218 | enabled = optional(bool, null)
219 | })
220 | description = "Configuration block with zonal shift configuration for the cluster"
221 | default = null
222 | }
223 |
224 | variable "cluster_attributes" {
225 | type = list(string)
226 | description = "Override label module default cluster attributes"
227 | default = ["cluster"]
228 | }
229 |
230 | variable "access_config" {
231 | type = object({
232 | authentication_mode = optional(string, "API")
233 | bootstrap_cluster_creator_admin_permissions = optional(bool, false)
234 | })
235 | description = "Access configuration for the EKS cluster."
236 | default = {}
237 | nullable = false
238 |
239 | validation {
240 | condition = !contains(["CONFIG_MAP"], var.access_config.authentication_mode)
241 | error_message = "The CONFIG_MAP authentication_mode is not supported."
242 | }
243 | }
244 |
245 | variable "access_entry_map" {
246 | type = map(object({
247 | # key is principal_arn
248 | user_name = optional(string)
249 | # Cannot assign "system:*" groups to IAM users, use ClusterAdmin and Admin instead
250 | kubernetes_groups = optional(list(string), [])
251 | type = optional(string, "STANDARD")
252 | access_policy_associations = optional(map(object({
253 | # key is policy_arn or policy_name
254 | access_scope = optional(object({
255 | type = optional(string, "cluster")
256 | namespaces = optional(list(string))
257 | }), {}) # access_scope
258 | })), {}) # access_policy_associations
259 | })) # access_entry_map
260 | description = <<-EOT
261 | Map of IAM Principal ARNs to access configuration.
262 | Preferred over other inputs as this configuration remains stable
263 | when elements are added or removed, but it requires that the Principal ARNs
264 | and Policy ARNs are known at plan time.
265 | Can be used along with other `access_*` inputs, but do not duplicate entries.
266 | Map `access_policy_associations` keys are policy ARNs, policy
267 | full name (AmazonEKSViewPolicy), or short name (View).
268 | It is recommended to use the default `user_name` because the default includes
269 | IAM role or user name and the session name for assumed roles.
270 | As a special case in support of backwards compatibility, membership in the
271 | `system:masters` group is is translated to an association with the ClusterAdmin policy.
272 | In all other cases, including any `system:*` group in `kubernetes_groups` is prohibited.
273 | EOT
274 | default = {}
275 | nullable = false
276 | }
277 |
278 | variable "access_entries" {
279 | type = list(object({
280 | principal_arn = string
281 | user_name = optional(string, null)
282 | kubernetes_groups = optional(list(string), null)
283 | }))
284 | description = <<-EOT
285 | List of IAM principles to allow to access the EKS cluster.
286 | It is recommended to use the default `user_name` because the default includes
287 | the IAM role or user name and the session name for assumed roles.
288 | Use when Principal ARN is not known at plan time.
289 | EOT
290 | default = []
291 | nullable = false
292 | }
293 |
294 | variable "access_policy_associations" {
295 | type = list(object({
296 | principal_arn = string
297 | policy_arn = string
298 | access_scope = optional(object({
299 | type = optional(string, "cluster")
300 | namespaces = optional(list(string))
301 | }), {})
302 | }))
303 | description = <<-EOT
304 | List of AWS managed EKS access policies to associate with IAM principles.
305 | Use when Principal ARN or Policy ARN is not known at plan time.
306 | `policy_arn` can be the full ARN, the full name (AmazonEKSViewPolicy) or short name (View).
307 | EOT
308 | default = []
309 | nullable = false
310 | }
311 |
312 | variable "access_entries_for_nodes" {
313 | # We use a map instead of an object because if a user supplies
314 | # an object with an unexpected key, Terraform simply ignores it,
315 | # leaving us with no way to detect the error.
316 | type = map(list(string))
317 | description = <<-EOT
318 | Map of list of IAM roles for the EKS non-managed worker nodes.
319 | The map key is the node type, either `EC2_LINUX` or `EC2_WINDOWS`,
320 | and the list contains the IAM roles of the nodes of that type.
321 | There is no need for or utility in creating Fargate access entries, as those
322 | are always created automatically by AWS, just as with managed nodes.
323 | Use when Principal ARN is not known at plan time.
324 | EOT
325 | default = {}
326 | nullable = false
327 | validation {
328 | condition = length([for k in keys(var.access_entries_for_nodes) : k if !contains(["EC2_LINUX", "EC2_WINDOWS"], k)]) == 0
329 | error_message = format(<<-EOS
330 | The access_entries_for_nodes object can only contain the EC2_LINUX and EC2_WINDOWS attributes:
331 | Keys "%s" not allowed.
332 | EOS
333 | , join("\", \"", [for k in keys(var.access_entries_for_nodes) : k if !contains(["EC2_LINUX", "EC2_WINDOWS"], k)]))
334 | }
335 | validation {
336 | condition = !(contains(keys(var.access_entries_for_nodes), "FARGATE_LINUX"))
337 | error_message = <<-EOM
338 | Access entries of type "FARGATE_LINUX" are not supported because they are
339 | automatically created by AWS EKS and should not be managed by Terraform.
340 | EOM
341 | }
342 | }
343 |
344 | ## Limited support for modifying the EKS-managed Security Group
345 | ## In the future, even this limited support may be removed
346 |
347 | variable "managed_security_group_rules_enabled" {
348 | type = bool
349 | description = "Flag to enable/disable the ingress and egress rules for the EKS managed Security Group"
350 | default = true
351 | }
352 |
353 | variable "allowed_security_group_ids" {
354 | type = list(string)
355 | default = []
356 | description = <<-EOT
357 | A list of IDs of Security Groups to allow access to the cluster.
358 | EOT
359 | }
360 |
361 | variable "allowed_cidr_blocks" {
362 | type = list(string)
363 | default = []
364 | description = <<-EOT
365 | A list of IPv4 CIDRs to allow access to the cluster.
366 | The length of this list must be known at "plan" time.
367 | EOT
368 | }
369 |
370 | variable "custom_ingress_rules" {
371 | type = list(object({
372 | description = string
373 | from_port = number
374 | to_port = number
375 | protocol = string
376 | source_security_group_id = string
377 | }))
378 | default = []
379 | description = <<-EOT
380 | A List of Objects, which are custom security group rules that
381 | EOT
382 | }
383 |
--------------------------------------------------------------------------------
/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.3.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 5.74.0"
8 | }
9 | tls = {
10 | source = "hashicorp/tls"
11 | version = ">= 3.1.0, != 4.0.0"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------