├── .editorconfig
├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── bug_report.yml
│ ├── config.yml
│ ├── feature_request.md
│ ├── feature_request.yml
│ └── question.md
├── PULL_REQUEST_TEMPLATE.md
├── banner.png
├── mergify.yml
├── renovate.json
├── settings.yml
└── workflows
│ ├── branch.yml
│ ├── chatops.yml
│ ├── release.yml
│ └── scheduled.yml
├── .gitignore
├── LICENSE
├── README.md
├── README.yaml
├── atmos.yaml
├── context.tf
├── docs
└── migration-0.7.x-0.8.x+.md
├── examples
└── complete
│ ├── context.tf
│ ├── fixtures.us-east-2.tfvars
│ ├── main.tf
│ ├── outputs.tf
│ ├── security-group-variables.tf
│ ├── variables.tf
│ └── versions.tf
├── main.tf
├── outputs.tf
├── security-group-variables.tf
├── test
├── .gitignore
├── Makefile
├── Makefile.alpine
└── src
│ ├── .gitignore
│ ├── Makefile
│ ├── examples_complete_test.go
│ ├── go.mod
│ ├── go.sum
│ └── utils.go
├── variables.tf
└── versions.tf
/.editorconfig:
--------------------------------------------------------------------------------
1 | # Unix-style newlines with a newline ending every file
2 | [*]
3 | charset = utf-8
4 | end_of_line = lf
5 | indent_size = 2
6 | indent_style = space
7 | insert_final_newline = true
8 | trim_trailing_whitespace = true
9 |
10 | [*.{tf,tfvars}]
11 | indent_size = 2
12 | indent_style = space
13 |
14 | [*.md]
15 | max_line_length = 0
16 | trim_trailing_whitespace = false
17 |
18 | # Override for Makefile
19 | [{Makefile, makefile, GNUmakefile, Makefile.*}]
20 | tab_width = 2
21 | indent_style = tab
22 | indent_size = 2
23 |
24 | [COMMIT_EDITMSG]
25 | max_line_length = 0
26 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # Use this file to define individuals or teams that are responsible for code in a repository.
2 | # Read more:
3 | #
4 | # Order is important: the last matching pattern has the highest precedence
5 |
6 | # These owners will be the default owners for everything
7 | * @cloudposse/engineering @cloudposse/contributors
8 |
9 | # Cloud Posse must review any changes to Makefiles
10 | **/Makefile @cloudposse/engineering
11 | **/Makefile.* @cloudposse/engineering
12 |
13 | # Cloud Posse must review any changes to GitHub actions
14 | .github/* @cloudposse/engineering
15 |
16 | # Cloud Posse must review any changes to standard context definition,
17 | # but some changes can be rubber-stamped.
18 | **/*.tf @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
19 | README.yaml @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
20 | README.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
21 | docs/*.md @cloudposse/engineering @cloudposse/contributors @cloudposse/approvers
22 |
23 | # Cloud Posse Admins must review all changes to CODEOWNERS or the mergify configuration
24 | .github/mergify.yml @cloudposse/admins
25 | .github/CODEOWNERS @cloudposse/admins
26 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: 'bug'
6 | assignees: ''
7 |
8 | ---
9 |
10 | Found a bug? Maybe our [Slack Community](https://slack.cloudposse.com) can help.
11 |
12 | [](https://slack.cloudposse.com)
13 |
14 | ## Describe the Bug
15 | A clear and concise description of what the bug is.
16 |
17 | ## Expected Behavior
18 | A clear and concise description of what you expected to happen.
19 |
20 | ## Steps to Reproduce
21 | Steps to reproduce the behavior:
22 | 1. Go to '...'
23 | 2. Run '....'
24 | 3. Enter '....'
25 | 4. See error
26 |
27 | ## Screenshots
28 | If applicable, add screenshots or logs to help explain your problem.
29 |
30 | ## Environment (please complete the following information):
31 |
32 | Anything that will help us triage the bug will help. Here are some ideas:
33 | - OS: [e.g. Linux, OSX, WSL, etc]
34 | - Version [e.g. 10.15]
35 |
36 | ## Additional Context
37 | Add any other context about the problem here.
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | description: Create a report to help us improve
4 | labels: ["bug"]
5 | assignees: [""]
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Found a bug?
11 |
12 | Please checkout our [Slack Community](https://slack.cloudposse.com)
13 | or visit our [Slack Archive](https://archive.sweetops.com/).
14 |
15 | [](https://slack.cloudposse.com)
16 |
17 | - type: textarea
18 | id: concise-description
19 | attributes:
20 | label: Describe the Bug
21 | description: A clear and concise description of what the bug is.
22 | placeholder: What is the bug about?
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | id: expected
28 | attributes:
29 | label: Expected Behavior
30 | description: A clear and concise description of what you expected.
31 | placeholder: What happened?
32 | validations:
33 | required: true
34 |
35 | - type: textarea
36 | id: reproduction-steps
37 | attributes:
38 | label: Steps to Reproduce
39 | description: Steps to reproduce the behavior.
40 | placeholder: How do we reproduce it?
41 | validations:
42 | required: true
43 |
44 | - type: textarea
45 | id: screenshots
46 | attributes:
47 | label: Screenshots
48 | description: If applicable, add screenshots or logs to help explain.
49 | validations:
50 | required: false
51 |
52 | - type: textarea
53 | id: environment
54 | attributes:
55 | label: Environment
56 | description: Anything that will help us triage the bug.
57 | placeholder: |
58 | - OS: [e.g. Linux, OSX, WSL, etc]
59 | - Version [e.g. 10.15]
60 | - Module version
61 | - Terraform version
62 | validations:
63 | required: false
64 |
65 | - type: textarea
66 | id: additional
67 | attributes:
68 | label: Additional Context
69 | description: |
70 | Add any other context about the problem here.
71 | validations:
72 | required: false
73 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 |
3 | contact_links:
4 |
5 | - name: Community Slack Team
6 | url: https://cloudposse.com/slack/
7 | about: |-
8 | Please ask and answer questions here.
9 |
10 | - name: Office Hours
11 | url: https://cloudposse.com/office-hours/
12 | about: |-
13 | Join us every Wednesday for FREE Office Hours (lunch & learn).
14 |
15 | - name: DevOps Accelerator Program
16 | url: https://cloudposse.com/accelerate/
17 | about: |-
18 | Own your infrastructure in record time. We build it. You drive it.
19 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: 'feature request'
6 | assignees: ''
7 |
8 | ---
9 |
10 | Have a question? Please checkout our [Slack Community](https://slack.cloudposse.com) or visit our [Slack Archive](https://archive.sweetops.com/).
11 |
12 | [](https://slack.cloudposse.com)
13 |
14 | ## Describe the Feature
15 |
16 | A clear and concise description of what the bug is.
17 |
18 | ## Expected Behavior
19 |
20 | A clear and concise description of what you expected to happen.
21 |
22 | ## Use Case
23 |
24 | Is your feature request related to a problem/challenge you are trying to solve? Please provide some additional context of why this feature or capability will be valuable.
25 |
26 | ## Describe Ideal Solution
27 |
28 | A clear and concise description of what you want to happen. If you don't know, that's okay.
29 |
30 | ## Alternatives Considered
31 |
32 | Explain what alternative solutions or features you've considered.
33 |
34 | ## Additional Context
35 |
36 | Add any other context or screenshots about the feature request here.
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | description: Suggest an idea for this project
4 | labels: ["feature request"]
5 | assignees: [""]
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | Have a question?
11 |
12 | Please checkout our [Slack Community](https://slack.cloudposse.com)
13 | or visit our [Slack Archive](https://archive.sweetops.com/).
14 |
15 | [](https://slack.cloudposse.com)
16 |
17 | - type: textarea
18 | id: concise-description
19 | attributes:
20 | label: Describe the Feature
21 | description: A clear and concise description of what the feature is.
22 | placeholder: What is the feature about?
23 | validations:
24 | required: true
25 |
26 | - type: textarea
27 | id: expected
28 | attributes:
29 | label: Expected Behavior
30 | description: A clear and concise description of what you expected.
31 | placeholder: What happened?
32 | validations:
33 | required: true
34 |
35 | - type: textarea
36 | id: use-case
37 | attributes:
38 | label: Use Case
39 | description: |
40 | Is your feature request related to a problem/challenge you are trying
41 | to solve?
42 |
43 | Please provide some additional context of why this feature or
44 | capability will be valuable.
45 | validations:
46 | required: true
47 |
48 | - type: textarea
49 | id: ideal-solution
50 | attributes:
51 | label: Describe Ideal Solution
52 | description: A clear and concise description of what you want to happen.
53 | validations:
54 | required: true
55 |
56 | - type: textarea
57 | id: alternatives-considered
58 | attributes:
59 | label: Alternatives Considered
60 | description: Explain alternative solutions or features considered.
61 | validations:
62 | required: false
63 |
64 | - type: textarea
65 | id: additional
66 | attributes:
67 | label: Additional Context
68 | description: |
69 | Add any other context about the problem here.
70 | validations:
71 | required: false
72 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/6ca7c0bca0f4e21ce58d44f14004c3cd6239ab31/.github/ISSUE_TEMPLATE/question.md
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## what
2 |
3 |
7 |
8 | ## why
9 |
10 |
15 |
16 | ## references
17 |
18 |
22 |
--------------------------------------------------------------------------------
/.github/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/6ca7c0bca0f4e21ce58d44f14004c3cd6239ab31/.github/banner.png
--------------------------------------------------------------------------------
/.github/mergify.yml:
--------------------------------------------------------------------------------
1 | extends: .github
2 |
--------------------------------------------------------------------------------
/.github/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "config:base",
4 | ":preserveSemverRanges"
5 | ],
6 | "baseBranches": ["main", "master", "/^release\\/v\\d{1,2}$/"],
7 | "labels": ["auto-update"],
8 | "dependencyDashboardAutoclose": true,
9 | "enabledManagers": ["terraform"],
10 | "terraform": {
11 | "ignorePaths": ["**/context.tf", "examples/**"]
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/.github/settings.yml:
--------------------------------------------------------------------------------
1 | # Upstream changes from _extends are only recognized when modifications are made to this file in the default branch.
2 | _extends: .github
3 | repository:
4 | name: terraform-aws-msk-apache-kafka-cluster
5 | description: Terraform module to provision AWS MSK
6 | homepage: https://cloudposse.com/accelerate
7 | topics: ""
8 |
9 |
10 |
--------------------------------------------------------------------------------
/.github/workflows/branch.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Branch
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | - release/**
8 | types: [opened, synchronize, reopened, labeled, unlabeled]
9 | push:
10 | branches:
11 | - main
12 | - release/v*
13 | paths-ignore:
14 | - '.github/**'
15 | - 'docs/**'
16 | - 'examples/**'
17 | - 'test/**'
18 | - 'README.md'
19 |
20 | permissions: {}
21 |
22 | jobs:
23 | terraform-module:
24 | uses: cloudposse/.github/.github/workflows/shared-terraform-module.yml@main
25 | secrets: inherit
26 |
--------------------------------------------------------------------------------
/.github/workflows/chatops.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: chatops
3 | on:
4 | issue_comment:
5 | types: [created]
6 |
7 | permissions:
8 | pull-requests: write
9 | id-token: write
10 | contents: write
11 | statuses: write
12 |
13 | jobs:
14 | test:
15 | uses: cloudposse/.github/.github/workflows/shared-terraform-chatops.yml@main
16 | if: ${{ github.event.issue.pull_request && contains(github.event.comment.body, '/terratest') }}
17 | secrets: inherit
18 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: release
3 | on:
4 | release:
5 | types:
6 | - published
7 |
8 | permissions:
9 | id-token: write
10 | contents: write
11 | pull-requests: write
12 |
13 | jobs:
14 | terraform-module:
15 | uses: cloudposse/.github/.github/workflows/shared-release-branches.yml@main
16 | secrets: inherit
17 |
--------------------------------------------------------------------------------
/.github/workflows/scheduled.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: scheduled
3 | on:
4 | workflow_dispatch: { } # Allows manually trigger this workflow
5 | schedule:
6 | - cron: "0 3 * * *"
7 |
8 | permissions:
9 | pull-requests: write
10 | id-token: write
11 | contents: write
12 |
13 | jobs:
14 | scheduled:
15 | uses: cloudposse/.github/.github/workflows/shared-terraform-scheduled.yml@main
16 | secrets: inherit
17 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 | .terraform
8 | .terraform.tfstate.lock.info
9 | .terraform.lock.hcl
10 |
11 | **/.idea
12 | **/*.iml
13 |
14 | # Cloud Posse Build Harness https://github.com/cloudposse/build-harness
15 | **/.build-harness
16 | **/build-harness
17 |
18 | # Crash log files
19 | crash.log
20 | test.log
21 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2020-2023 Cloud Posse, LLC
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 
5 |
6 | 


7 |
8 |
9 |
29 |
30 | Terraform module to provision [Amazon Managed Streaming](https://aws.amazon.com/msk/) for [Apache Kafka](https://aws.amazon.com/msk/what-is-kafka/)
31 |
32 | __Note:__ this module is intended for use with an existing VPC.
33 | To create a new VPC, use [terraform-aws-vpc](https://github.com/cloudposse/terraform-aws-vpc) module.
34 |
35 | **NOTE**: Release `0.8.0` contains breaking changes that will result in the destruction of your existing MSK cluster.
36 | To preserve the original cluster, follow the instructions in the [0.7.x to 0.8.x+ migration path](./docs/migration-0.7.x-0.8.x+.md).
37 |
38 |
39 | > [!TIP]
40 | > #### 👽 Use Atmos with Terraform
41 | > Cloud Posse uses [`atmos`](https://atmos.tools) to easily orchestrate multiple environments using Terraform.
42 | > Works with [Github Actions](https://atmos.tools/integrations/github-actions/), [Atlantis](https://atmos.tools/integrations/atlantis), or [Spacelift](https://atmos.tools/integrations/spacelift).
43 | >
44 | >
45 | > Watch demo of using Atmos with Terraform
46 | > 
47 | > Example of running atmos
to manage infrastructure from our Quick Start tutorial.
48 | >
49 |
50 |
51 |
52 |
53 |
54 | ## Usage
55 |
56 | Here's how to invoke this example module in your projects
57 |
58 | ```hcl
59 | module "kafka" {
60 | source = "cloudposse/msk-apache-kafka-cluster/aws"
61 | # Cloud Posse recommends pinning every module to a specific version
62 | # version = "x.x.x"
63 |
64 | kafka_version = "3.3.2"
65 | namespace = "eg"
66 | stage = "prod"
67 | name = "app"
68 | vpc_id = "vpc-XXXXXXXX"
69 | subnet_ids = ["subnet-XXXXXXXXX", "subnet-YYYYYYYY"]
70 | broker_per_zone = 2
71 | broker_instance_type = "kafka.m5.large"
72 |
73 | # A list of IDs of Security Groups to associate the created resource with, in addition to the created security group
74 | associated_security_group_ids = ["sg-XXXXXXXXX", "sg-YYYYYYYY"]
75 |
76 | # A list of IDs of Security Groups to allow access to the cluster
77 | allowed_security_group_ids = ["sg-XXXXXXXXX", "sg-YYYYYYYY"]
78 | }
79 | ```
80 |
81 | > [!IMPORTANT]
82 | > In Cloud Posse's examples, we avoid pinning modules to specific versions to prevent discrepancies between the documentation
83 | > and the latest released versions. However, for your own projects, we strongly advise pinning each module to the exact version
84 | > you're using. This practice ensures the stability of your infrastructure. Additionally, we recommend implementing a systematic
85 | > approach for updating versions to avoid unexpected changes.
86 |
87 |
88 |
89 |
90 |
91 | ## Examples
92 |
93 | Here is an example of using this module:
94 | - [`examples/complete`](https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/) - complete example of using this module
95 |
96 |
97 |
98 |
99 |
100 | ## Requirements
101 |
102 | | Name | Version |
103 | |------|---------|
104 | | [terraform](#requirement\_terraform) | >= 1.0.0 |
105 | | [aws](#requirement\_aws) | >= 4.0 |
106 |
107 | ## Providers
108 |
109 | | Name | Version |
110 | |------|---------|
111 | | [aws](#provider\_aws) | >= 4.0 |
112 |
113 | ## Modules
114 |
115 | | Name | Source | Version |
116 | |------|--------|---------|
117 | | [hostname](#module\_hostname) | cloudposse/route53-cluster-hostname/aws | 0.13.0 |
118 | | [security\_group](#module\_security\_group) | cloudposse/security-group/aws | 2.2.0 |
119 | | [this](#module\_this) | cloudposse/label/null | 0.25.0 |
120 |
121 | ## Resources
122 |
123 | | Name | Type |
124 | |------|------|
125 | | [aws_appautoscaling_policy.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/appautoscaling_policy) | resource |
126 | | [aws_appautoscaling_target.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/appautoscaling_target) | resource |
127 | | [aws_msk_cluster.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster) | resource |
128 | | [aws_msk_configuration.config](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_configuration) | resource |
129 | | [aws_msk_scram_secret_association.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_scram_secret_association) | resource |
130 | | [aws_msk_broker_nodes.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/msk_broker_nodes) | data source |
131 |
132 | ## Inputs
133 |
134 | | Name | Description | Type | Default | Required |
135 | |------|-------------|------|---------|:--------:|
136 | | [additional\_security\_group\_rules](#input\_additional\_security\_group\_rules) | A list of Security Group rule objects to add to the created security group, in addition to the ones
this module normally creates. (To suppress the module's rules, set `create_security_group` to false
and supply your own security group(s) via `associated_security_group_ids`.)
The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except
for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time.
For more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule
and https://github.com/cloudposse/terraform-aws-security-group. | `list(any)` | `[]` | no |
137 | | [additional\_tag\_map](#input\_additional\_tag\_map) | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
This is for some rare cases where resources want additional configuration of tags
and therefore take a list of maps with tag key, value, and additional configuration. | `map(string)` | `{}` | no |
138 | | [allow\_all\_egress](#input\_allow\_all\_egress) | If `true`, the created security group will allow egress on all ports and protocols to all IP addresses.
If this is false and no egress rules are otherwise specified, then no egress will be allowed. | `bool` | `true` | no |
139 | | [allowed\_cidr\_blocks](#input\_allowed\_cidr\_blocks) | A list of IPv4 CIDRs to allow access to the security group created by this module.
The length of this list must be known at "plan" time. | `list(string)` | `[]` | no |
140 | | [allowed\_security\_group\_ids](#input\_allowed\_security\_group\_ids) | A list of IDs of Security Groups to allow access to the security group created by this module.
The length of this list must be known at "plan" time. | `list(string)` | `[]` | no |
141 | | [associated\_security\_group\_ids](#input\_associated\_security\_group\_ids) | A list of IDs of Security Groups to associate the created resource with, in addition to the created security group.
These security groups will not be modified and, if `create_security_group` is `false`, must have rules providing the desired access. | `list(string)` | `[]` | no |
142 | | [attributes](#input\_attributes) | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
in the order they appear in the list. New attributes are appended to the
end of the list. The elements of the list are joined by the `delimiter`
and treated as a single ID element. | `list(string)` | `[]` | no |
143 | | [autoscaling\_enabled](#input\_autoscaling\_enabled) | To automatically expand your cluster's storage in response to increased usage, you can enable this. [More info](https://docs.aws.amazon.com/msk/latest/developerguide/msk-autoexpand.html) | `bool` | `true` | no |
144 | | [broker\_dns\_records\_count](#input\_broker\_dns\_records\_count) | This variable specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable.
This corresponds to the total number of broker endpoints created by the module.
Calculate this number by multiplying the `broker_per_zone` variable by the subnet count.
This variable is necessary to prevent the Terraform error:
The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. | `number` | `0` | no |
145 | | [broker\_instance\_type](#input\_broker\_instance\_type) | The instance type to use for the Kafka brokers | `string` | n/a | yes |
146 | | [broker\_per\_zone](#input\_broker\_per\_zone) | Number of Kafka brokers per zone | `number` | `1` | no |
147 | | [broker\_volume\_size](#input\_broker\_volume\_size) | The size in GiB of the EBS volume for the data drive on each broker node | `number` | `1000` | no |
148 | | [certificate\_authority\_arns](#input\_certificate\_authority\_arns) | List of ACM Certificate Authority Amazon Resource Names (ARNs) to be used for TLS client authentication | `list(string)` | `[]` | no |
149 | | [client\_allow\_unauthenticated](#input\_client\_allow\_unauthenticated) | Enable unauthenticated access | `bool` | `false` | no |
150 | | [client\_broker](#input\_client\_broker) | Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT` | `string` | `"TLS"` | no |
151 | | [client\_sasl\_iam\_enabled](#input\_client\_sasl\_iam\_enabled) | Enable client authentication via IAM policies. Cannot be set to `true` at the same time as `client_tls_auth_enabled` | `bool` | `false` | no |
152 | | [client\_sasl\_scram\_enabled](#input\_client\_sasl\_scram\_enabled) | Enable SCRAM client authentication via AWS Secrets Manager. Cannot be set to `true` at the same time as `client_tls_auth_enabled` | `bool` | `false` | no |
153 | | [client\_sasl\_scram\_secret\_association\_arns](#input\_client\_sasl\_scram\_secret\_association\_arns) | List of AWS Secrets Manager secret ARNs for SCRAM authentication | `list(string)` | `[]` | no |
154 | | [client\_sasl\_scram\_secret\_association\_enabled](#input\_client\_sasl\_scram\_secret\_association\_enabled) | Enable the list of AWS Secrets Manager secret ARNs for SCRAM authentication | `bool` | `true` | no |
155 | | [client\_tls\_auth\_enabled](#input\_client\_tls\_auth\_enabled) | Set `true` to enable the Client TLS Authentication | `bool` | `false` | no |
156 | | [cloudwatch\_logs\_enabled](#input\_cloudwatch\_logs\_enabled) | Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs | `bool` | `false` | no |
157 | | [cloudwatch\_logs\_log\_group](#input\_cloudwatch\_logs\_log\_group) | Name of the Cloudwatch Log Group to deliver logs to | `string` | `null` | no |
158 | | [context](#input\_context) | Single object for setting entire context at once.
See description of individual variables for details.
Leave string and numeric variables as `null` to use default value.
Individual variable settings (non-null) override settings in context object,
except for attributes, tags, and additional\_tag\_map, which are merged. | `any` | {
"additional_tag_map": {},
"attributes": [],
"delimiter": null,
"descriptor_formats": {},
"enabled": true,
"environment": null,
"id_length_limit": null,
"label_key_case": null,
"label_order": [],
"label_value_case": null,
"labels_as_tags": [
"unset"
],
"name": null,
"namespace": null,
"regex_replace_chars": null,
"stage": null,
"tags": {},
"tenant": null
}
| no |
159 | | [create\_security\_group](#input\_create\_security\_group) | Set `true` to create and configure a new security group. If false, `associated_security_group_ids` must be provided. | `bool` | `true` | no |
160 | | [custom\_broker\_dns\_name](#input\_custom\_broker\_dns\_name) | Custom Route53 DNS hostname for MSK brokers. Use `%%ID%%` key to specify brokers index in the hostname. Example: `kafka-broker%%ID%%.example.com` | `string` | `null` | no |
161 | | [delimiter](#input\_delimiter) | Delimiter to be used between ID elements.
Defaults to `-` (hyphen). Set to `""` to use no delimiter at all. | `string` | `null` | no |
162 | | [descriptor\_formats](#input\_descriptor\_formats) | Describe additional descriptors to be output in the `descriptors` output map.
Map of maps. Keys are names of descriptors. Values are maps of the form
`{
format = string
labels = list(string)
}`
(Type is `any` so the map values can later be enhanced to provide additional options.)
`format` is a Terraform format string to be passed to the `format()` function.
`labels` is a list of labels, in order, to pass to `format()` function.
Label values will be normalized before being passed to `format()` so they will be
identical to how they appear in `id`.
Default is `{}` (`descriptors` output will be empty). | `any` | `{}` | no |
163 | | [enabled](#input\_enabled) | Set to false to prevent the module from creating any resources | `bool` | `null` | no |
164 | | [encryption\_at\_rest\_kms\_key\_arn](#input\_encryption\_at\_rest\_kms\_key\_arn) | You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest | `string` | `""` | no |
165 | | [encryption\_in\_cluster](#input\_encryption\_in\_cluster) | Whether data communication among broker nodes is encrypted | `bool` | `true` | no |
166 | | [enhanced\_monitoring](#input\_enhanced\_monitoring) | Specify the desired enhanced MSK CloudWatch monitoring level. Valid values: `DEFAULT`, `PER_BROKER`, and `PER_TOPIC_PER_BROKER` | `string` | `"DEFAULT"` | no |
167 | | [environment](#input\_environment) | ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT' | `string` | `null` | no |
168 | | [firehose\_delivery\_stream](#input\_firehose\_delivery\_stream) | Name of the Kinesis Data Firehose delivery stream to deliver logs to | `string` | `""` | no |
169 | | [firehose\_logs\_enabled](#input\_firehose\_logs\_enabled) | Indicates whether you want to enable or disable streaming broker logs to Kinesis Data Firehose | `bool` | `false` | no |
170 | | [id\_length\_limit](#input\_id\_length\_limit) | Limit `id` to this many characters (minimum 6).
Set to `0` for unlimited length.
Set to `null` for keep the existing setting, which defaults to `0`.
Does not affect `id_full`. | `number` | `null` | no |
171 | | [inline\_rules\_enabled](#input\_inline\_rules\_enabled) | NOT RECOMMENDED. Create rules "inline" instead of as separate `aws_security_group_rule` resources.
See [#20046](https://github.com/hashicorp/terraform-provider-aws/issues/20046) for one of several issues with inline rules.
See [this post](https://github.com/hashicorp/terraform-provider-aws/pull/9032#issuecomment-639545250) for details on the difference between inline rules and rule resources. | `bool` | `false` | no |
172 | | [jmx\_exporter\_enabled](#input\_jmx\_exporter\_enabled) | Set `true` to enable the JMX Exporter | `bool` | `false` | no |
173 | | [kafka\_version](#input\_kafka\_version) | The desired Kafka software version.
Refer to https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html for more details | `string` | n/a | yes |
174 | | [label\_key\_case](#input\_label\_key\_case) | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
Does not affect keys of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper`.
Default value: `title`. | `string` | `null` | no |
175 | | [label\_order](#input\_label\_order) | The order in which the labels (ID elements) appear in the `id`.
Defaults to ["namespace", "environment", "stage", "name", "attributes"].
You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present. | `list(string)` | `null` | no |
176 | | [label\_value\_case](#input\_label\_value\_case) | Controls the letter case of ID elements (labels) as included in `id`,
set as tag values, and output by this module individually.
Does not affect values of tags passed in via the `tags` input.
Possible values: `lower`, `title`, `upper` and `none` (no transformation).
Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
Default value: `lower`. | `string` | `null` | no |
177 | | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` | [
"default"
]
| no |
178 | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no |
179 | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no |
180 | | [node\_exporter\_enabled](#input\_node\_exporter\_enabled) | Set `true` to enable the Node Exporter | `bool` | `false` | no |
181 | | [preserve\_security\_group\_id](#input\_preserve\_security\_group\_id) | When `false` and `security_group_create_before_destroy` is `true`, changes to security group rules
cause a new security group to be created with the new rules, and the existing security group is then
replaced with the new one, eliminating any service interruption.
When `true` or when changing the value (from `false` to `true` or from `true` to `false`),
existing security group rules will be deleted before new ones are created, resulting in a service interruption,
but preserving the security group itself.
**NOTE:** Setting this to `true` does not guarantee the security group will never be replaced,
it only keeps changes to the security group rules from triggering a replacement.
See the [terraform-aws-security-group README](https://github.com/cloudposse/terraform-aws-security-group) for further discussion. | `bool` | `false` | no |
182 | | [properties](#input\_properties) | Contents of the server.properties file. Supported properties are documented in the [MSK Developer Guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-configuration-properties.html) | `map(string)` | `{}` | no |
183 | | [public\_access\_enabled](#input\_public\_access\_enabled) | Enable public access to MSK cluster (given that all of the requirements are met) | `bool` | `false` | no |
184 | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no |
185 | | [s3\_logs\_bucket](#input\_s3\_logs\_bucket) | Name of the S3 bucket to deliver logs to | `string` | `""` | no |
186 | | [s3\_logs\_enabled](#input\_s3\_logs\_enabled) | Indicates whether you want to enable or disable streaming broker logs to S3 | `bool` | `false` | no |
187 | | [s3\_logs\_prefix](#input\_s3\_logs\_prefix) | Prefix to append to the S3 folder name logs are delivered to | `string` | `""` | no |
188 | | [security\_group\_create\_before\_destroy](#input\_security\_group\_create\_before\_destroy) | Set `true` to enable terraform `create_before_destroy` behavior on the created security group.
We only recommend setting this `false` if you are importing an existing security group
that you do not want replaced and therefore need full control over its name.
Note that changing this value will always cause the security group to be replaced. | `bool` | `true` | no |
189 | | [security\_group\_create\_timeout](#input\_security\_group\_create\_timeout) | How long to wait for the security group to be created. | `string` | `"10m"` | no |
190 | | [security\_group\_delete\_timeout](#input\_security\_group\_delete\_timeout) | How long to retry on `DependencyViolation` errors during security group deletion from
lingering ENIs left by certain AWS services such as Elastic Load Balancing. | `string` | `"15m"` | no |
191 | | [security\_group\_description](#input\_security\_group\_description) | The description to assign to the created Security Group.
Warning: Changing the description causes the security group to be replaced. | `string` | `"Managed by Terraform"` | no |
192 | | [security\_group\_name](#input\_security\_group\_name) | The name to assign to the created security group. Must be unique within the VPC.
If not provided, will be derived from the `null-label.context` passed in.
If `create_before_destroy` is true, will be used as a name prefix. | `list(string)` | `[]` | no |
193 | | [security\_group\_rule\_description](#input\_security\_group\_rule\_description) | The description to place on each security group rule. The %s will be replaced with the protocol name | `string` | `"Allow inbound %s traffic"` | no |
194 | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no |
195 | | [storage\_autoscaling\_disable\_scale\_in](#input\_storage\_autoscaling\_disable\_scale\_in) | If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource | `bool` | `false` | no |
196 | | [storage\_autoscaling\_max\_capacity](#input\_storage\_autoscaling\_max\_capacity) | Maximum size the autoscaling policy can scale storage. Defaults to `broker_volume_size` | `number` | `null` | no |
197 | | [storage\_autoscaling\_target\_value](#input\_storage\_autoscaling\_target\_value) | Percentage of storage used to trigger autoscaled storage increase | `number` | `60` | no |
198 | | [subnet\_ids](#input\_subnet\_ids) | Subnet IDs for Client Broker | `list(string)` | n/a | yes |
199 | | [tags](#input\_tags) | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
Neither the tag keys nor the tag values will be modified by this module. | `map(string)` | `{}` | no |
200 | | [tenant](#input\_tenant) | ID element \_(Rarely used, not included by default)\_. A customer identifier, indicating who this instance of a resource is for | `string` | `null` | no |
201 | | [vpc\_id](#input\_vpc\_id) | The ID of the VPC where the Security Group will be created. | `string` | n/a | yes |
202 | | [zone\_id](#input\_zone\_id) | Route53 DNS Zone ID for MSK broker hostnames | `string` | `null` | no |
203 |
204 | ## Outputs
205 |
206 | | Name | Description |
207 | |------|-------------|
208 | | [bootstrap\_brokers](#output\_bootstrap\_brokers) | Comma separated list of one or more hostname:port pairs of Kafka brokers suitable to bootstrap connectivity to the Kafka cluster |
209 | | [bootstrap\_brokers\_public\_sasl\_iam](#output\_bootstrap\_brokers\_public\_sasl\_iam) | Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for public access to the Kafka cluster using SASL/IAM |
210 | | [bootstrap\_brokers\_public\_sasl\_scram](#output\_bootstrap\_brokers\_public\_sasl\_scram) | Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for public access to the Kafka cluster using SASL/SCRAM |
211 | | [bootstrap\_brokers\_public\_tls](#output\_bootstrap\_brokers\_public\_tls) | Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for public access to the Kafka cluster using TLS |
212 | | [bootstrap\_brokers\_sasl\_iam](#output\_bootstrap\_brokers\_sasl\_iam) | Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for access to the Kafka cluster using SASL/IAM |
213 | | [bootstrap\_brokers\_sasl\_scram](#output\_bootstrap\_brokers\_sasl\_scram) | Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for access to the Kafka cluster using SASL/SCRAM |
214 | | [bootstrap\_brokers\_tls](#output\_bootstrap\_brokers\_tls) | Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for access to the Kafka cluster using TLS |
215 | | [broker\_endpoints](#output\_broker\_endpoints) | List of broker endpoints |
216 | | [cluster\_arn](#output\_cluster\_arn) | Amazon Resource Name (ARN) of the MSK cluster |
217 | | [cluster\_name](#output\_cluster\_name) | MSK Cluster name |
218 | | [config\_arn](#output\_config\_arn) | Amazon Resource Name (ARN) of the MSK configuration |
219 | | [current\_version](#output\_current\_version) | Current version of the MSK Cluster |
220 | | [hostnames](#output\_hostnames) | List of MSK Cluster broker DNS hostnames |
221 | | [latest\_revision](#output\_latest\_revision) | Latest revision of the MSK configuration |
222 | | [security\_group\_arn](#output\_security\_group\_arn) | The ARN of the created security group |
223 | | [security\_group\_id](#output\_security\_group\_id) | The ID of the created security group |
224 | | [security\_group\_name](#output\_security\_group\_name) | n/a |
225 | | [storage\_mode](#output\_storage\_mode) | Storage mode for supported storage tiers |
226 | | [zookeeper\_connect\_string](#output\_zookeeper\_connect\_string) | Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster |
227 | | [zookeeper\_connect\_string\_tls](#output\_zookeeper\_connect\_string\_tls) | Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster via TLS |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 | ## Related Projects
237 |
238 | Check out these related projects.
239 |
240 | - [terraform-null-label](https://github.com/cloudposse/terraform-null-label) - Terraform module designed to generate consistent names and tags for resources. Use terraform-null-label to implement a strict naming convention.
241 | - [terraform-aws-route53-cluster-hostname](https://github.com/cloudposse/terraform-aws-route53-cluster-hostname) - Terraform module to define a consistent AWS Route53 hostname
242 | - [terraform-aws-vpc](https://github.com/cloudposse/terraform-aws-vpc) - Terraform module to provision a VPC with Internet Gateway.
243 |
244 |
245 | ## References
246 |
247 | For additional context, refer to some of these links.
248 |
249 | - [Terraform Standard Module Structure](https://www.terraform.io/docs/modules/index.html#standard-module-structure) - HashiCorp's standard module structure is a file and directory layout we recommend for reusable modules distributed in separate repositories.
250 | - [Terraform Module Requirements](https://www.terraform.io/docs/registry/modules/publish.html#requirements) - HashiCorp's guidance on all the requirements for publishing a module. Meeting the requirements for publishing a module is extremely easy.
251 | - [Terraform `random_integer` Resource](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/integer) - The resource random_integer generates random values from a given range, described by the min and max attributes of a given resource.
252 | - [Terraform Version Pinning](https://www.terraform.io/docs/configuration/terraform.html#specifying-a-required-terraform-version) - The required_version setting can be used to constrain which versions of the Terraform CLI can be used with your configuration
253 |
254 |
255 |
256 | > [!TIP]
257 | > #### Use Terraform Reference Architectures for AWS
258 | >
259 | > Use Cloud Posse's ready-to-go [terraform architecture blueprints](https://cloudposse.com/reference-architecture/) for AWS to get up and running quickly.
260 | >
261 | > ✅ We build it together with your team.
262 | > ✅ Your team owns everything.
263 | > ✅ 100% Open Source and backed by fanatical support.
264 | >
265 | >
266 | > 📚 Learn More
267 | >
268 | >
269 | >
270 | > Cloud Posse is the leading [**DevOps Accelerator**](https://cpco.io/commercial-support?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-msk-apache-kafka-cluster&utm_content=commercial_support) for funded startups and enterprises.
271 | >
272 | > *Your team can operate like a pro today.*
273 | >
274 | > Ensure that your team succeeds by using Cloud Posse's proven process and turnkey blueprints. Plus, we stick around until you succeed.
275 | > #### Day-0: Your Foundation for Success
276 | > - **Reference Architecture.** You'll get everything you need from the ground up built using 100% infrastructure as code.
277 | > - **Deployment Strategy.** Adopt a proven deployment strategy with GitHub Actions, enabling automated, repeatable, and reliable software releases.
278 | > - **Site Reliability Engineering.** Gain total visibility into your applications and services with Datadog, ensuring high availability and performance.
279 | > - **Security Baseline.** Establish a secure environment from the start, with built-in governance, accountability, and comprehensive audit logs, safeguarding your operations.
280 | > - **GitOps.** Empower your team to manage infrastructure changes confidently and efficiently through Pull Requests, leveraging the full power of GitHub Actions.
281 | >
282 | >
283 | >
284 | > #### Day-2: Your Operational Mastery
285 | > - **Training.** Equip your team with the knowledge and skills to confidently manage the infrastructure, ensuring long-term success and self-sufficiency.
286 | > - **Support.** Benefit from a seamless communication over Slack with our experts, ensuring you have the support you need, whenever you need it.
287 | > - **Troubleshooting.** Access expert assistance to quickly resolve any operational challenges, minimizing downtime and maintaining business continuity.
288 | > - **Code Reviews.** Enhance your team’s code quality with our expert feedback, fostering continuous improvement and collaboration.
289 | > - **Bug Fixes.** Rely on our team to troubleshoot and resolve any issues, ensuring your systems run smoothly.
290 | > - **Migration Assistance.** Accelerate your migration process with our dedicated support, minimizing disruption and speeding up time-to-value.
291 | > - **Customer Workshops.** Engage with our team in weekly workshops, gaining insights and strategies to continuously improve and innovate.
292 | >
293 | >
294 | >
295 |
296 | ## ✨ Contributing
297 |
298 | This project is under active development, and we encourage contributions from our community.
299 |
300 |
301 |
302 | Many thanks to our outstanding contributors:
303 |
304 |
305 |
306 |
307 |
308 | For 🐛 bug reports & feature requests, please use the [issue tracker](https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/issues).
309 |
310 | In general, PRs are welcome. We follow the typical "fork-and-pull" Git workflow.
311 | 1. Review our [Code of Conduct](https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/?tab=coc-ov-file#code-of-conduct) and [Contributor Guidelines](https://github.com/cloudposse/.github/blob/main/CONTRIBUTING.md).
312 | 2. **Fork** the repo on GitHub
313 | 3. **Clone** the project to your own machine
314 | 4. **Commit** changes to your own branch
315 | 5. **Push** your work back up to your fork
316 | 6. Submit a **Pull Request** so that we can review your changes
317 |
318 | **NOTE:** Be sure to merge the latest changes from "upstream" before making a pull request!
319 |
320 | ### 🌎 Slack Community
321 |
322 | Join our [Open Source Community](https://cpco.io/slack?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-msk-apache-kafka-cluster&utm_content=slack) on Slack. It's **FREE** for everyone! Our "SweetOps" community is where you get to talk with others who share a similar vision for how to rollout and manage infrastructure. This is the best place to talk shop, ask questions, solicit feedback, and work together as a community to build totally *sweet* infrastructure.
323 |
324 | ### 📰 Newsletter
325 |
326 | Sign up for [our newsletter](https://cpco.io/newsletter?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-msk-apache-kafka-cluster&utm_content=newsletter) and join 3,000+ DevOps engineers, CTOs, and founders who get insider access to the latest DevOps trends, so you can always stay in the know.
327 | Dropped straight into your Inbox every week — and usually a 5-minute read.
328 |
329 | ### 📆 Office Hours
330 |
331 | [Join us every Wednesday via Zoom](https://cloudposse.com/office-hours?utm_source=github&utm_medium=readme&utm_campaign=cloudposse/terraform-aws-msk-apache-kafka-cluster&utm_content=office_hours) for your weekly dose of insider DevOps trends, AWS news and Terraform insights, all sourced from our SweetOps community, plus a _live Q&A_ that you can’t find anywhere else.
332 | It's **FREE** for everyone!
333 | ## License
334 |
335 |
336 |
337 |
338 | Preamble to the Apache License, Version 2.0
339 |
340 |
341 |
342 | Complete license is available in the [`LICENSE`](LICENSE) file.
343 |
344 | ```text
345 | Licensed to the Apache Software Foundation (ASF) under one
346 | or more contributor license agreements. See the NOTICE file
347 | distributed with this work for additional information
348 | regarding copyright ownership. The ASF licenses this file
349 | to you under the Apache License, Version 2.0 (the
350 | "License"); you may not use this file except in compliance
351 | with the License. You may obtain a copy of the License at
352 |
353 | https://www.apache.org/licenses/LICENSE-2.0
354 |
355 | Unless required by applicable law or agreed to in writing,
356 | software distributed under the License is distributed on an
357 | "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
358 | KIND, either express or implied. See the License for the
359 | specific language governing permissions and limitations
360 | under the License.
361 | ```
362 |
363 |
364 | ## Trademarks
365 |
366 | All other trademarks referenced herein are the property of their respective owners.
367 |
368 |
369 | ## Copyrights
370 |
371 | Copyright © 2020-2025 [Cloud Posse, LLC](https://cloudposse.com)
372 |
373 |
374 |
375 |
376 |
377 |
378 |
--------------------------------------------------------------------------------
/README.yaml:
--------------------------------------------------------------------------------
1 | #
2 | # This is the canonical configuration for the `README.md`
3 | # Run `make readme` to rebuild the `README.md`
4 | #
5 |
6 | # Name of this project
7 | name: terraform-aws-msk-apache-kafka-cluster
8 |
9 | # Logo for this project
10 | #logo: docs/logo.png
11 |
12 | # License of this project
13 | license: "APACHE2"
14 |
15 | # Copyrights
16 | copyrights:
17 | - name: "Cloud Posse, LLC"
18 | url: "https://cloudposse.com"
19 | year: "2020"
20 |
21 | # Canonical GitHub repo
22 | github_repo: cloudposse/terraform-aws-msk-apache-kafka-cluster
23 |
24 | # Badges to display
25 | badges:
26 | - name: Latest Release
27 | image: https://img.shields.io/github/release/cloudposse/terraform-aws-msk-apache-kafka-cluster.svg?style=for-the-badge
28 | url: https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/releases/latest
29 | - name: Last Updated
30 | image: https://img.shields.io/github/last-commit/cloudposse/terraform-aws-msk-apache-kafka-cluster.svg?style=for-the-badge
31 | url: https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/commits
32 | - name: Slack Community
33 | image: https://slack.cloudposse.com/for-the-badge.svg
34 | url: https://cloudposse.com/slack
35 |
36 | # List any related terraform modules that this module may be used with or that this module depends on.
37 | related:
38 | - name: "terraform-null-label"
39 | description: "Terraform module designed to generate consistent names and tags for resources. Use terraform-null-label to implement a strict naming convention."
40 | url: "https://github.com/cloudposse/terraform-null-label"
41 | - name: "terraform-aws-route53-cluster-hostname"
42 | description: "Terraform module to define a consistent AWS Route53 hostname"
43 | url: "https://github.com/cloudposse/terraform-aws-route53-cluster-hostname"
44 | - name: "terraform-aws-vpc"
45 | description: "Terraform module to provision a VPC with Internet Gateway."
46 | url: "https://github.com/cloudposse/terraform-aws-vpc"
47 |
48 | # List any resources helpful for someone to get started. For example, link to the hashicorp documentation or AWS documentation.
49 | references:
50 | - name: "Terraform Standard Module Structure"
51 | description: "HashiCorp's standard module structure is a file and directory layout we recommend for reusable modules distributed in separate repositories."
52 | url: "https://www.terraform.io/docs/modules/index.html#standard-module-structure"
53 | - name: "Terraform Module Requirements"
54 | description: "HashiCorp's guidance on all the requirements for publishing a module. Meeting the requirements for publishing a module is extremely easy."
55 | url: "https://www.terraform.io/docs/registry/modules/publish.html#requirements"
56 | - name: "Terraform `random_integer` Resource"
57 | description: "The resource random_integer generates random values from a given range, described by the min and max attributes of a given resource."
58 | url: "https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/integer"
59 | - name: "Terraform Version Pinning"
60 | description: "The required_version setting can be used to constrain which versions of the Terraform CLI can be used with your configuration"
61 | url: "https://www.terraform.io/docs/configuration/terraform.html#specifying-a-required-terraform-version"
62 |
63 | # Short description of this project
64 | description: |-
65 | Terraform module to provision [Amazon Managed Streaming](https://aws.amazon.com/msk/) for [Apache Kafka](https://aws.amazon.com/msk/what-is-kafka/)
66 |
67 | __Note:__ this module is intended for use with an existing VPC.
68 | To create a new VPC, use [terraform-aws-vpc](https://github.com/cloudposse/terraform-aws-vpc) module.
69 |
70 | **NOTE**: Release `0.8.0` contains breaking changes that will result in the destruction of your existing MSK cluster.
71 | To preserve the original cluster, follow the instructions in the [0.7.x to 0.8.x+ migration path](./docs/migration-0.7.x-0.8.x+.md).
72 |
73 | # Introduction to the project
74 | #introduction: |-
75 | # This is an introduction.
76 |
77 | # How to use this module. Should be an easy example to copy and paste.
78 | usage: |-
79 | Here's how to invoke this example module in your projects
80 |
81 | ```hcl
82 | module "kafka" {
83 | source = "cloudposse/msk-apache-kafka-cluster/aws"
84 | # Cloud Posse recommends pinning every module to a specific version
85 | # version = "x.x.x"
86 |
87 | kafka_version = "3.3.2"
88 | namespace = "eg"
89 | stage = "prod"
90 | name = "app"
91 | vpc_id = "vpc-XXXXXXXX"
92 | subnet_ids = ["subnet-XXXXXXXXX", "subnet-YYYYYYYY"]
93 | broker_per_zone = 2
94 | broker_instance_type = "kafka.m5.large"
95 |
96 | # A list of IDs of Security Groups to associate the created resource with, in addition to the created security group
97 | associated_security_group_ids = ["sg-XXXXXXXXX", "sg-YYYYYYYY"]
98 |
99 | # A list of IDs of Security Groups to allow access to the cluster
100 | allowed_security_group_ids = ["sg-XXXXXXXXX", "sg-YYYYYYYY"]
101 | }
102 | ```
103 |
104 | # Example usage
105 | examples: |-
106 | Here is an example of using this module:
107 | - [`examples/complete`](https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/) - complete example of using this module
108 |
109 | include: []
110 | contributors: []
111 |
--------------------------------------------------------------------------------
/atmos.yaml:
--------------------------------------------------------------------------------
1 | # Atmos Configuration — powered by https://atmos.tools
2 | #
3 | # This configuration enables centralized, DRY, and consistent project scaffolding using Atmos.
4 | #
5 | # Included features:
6 | # - Organizational custom commands: https://atmos.tools/core-concepts/custom-commands
7 | # - Automated README generation: https://atmos.tools/cli/commands/docs/generate
8 | #
9 |
10 | # Import shared configuration used by all modules
11 | import:
12 | - https://raw.githubusercontent.com/cloudposse/.github/refs/heads/main/.github/atmos/terraform-module.yaml
13 |
--------------------------------------------------------------------------------
/context.tf:
--------------------------------------------------------------------------------
1 | #
2 | # ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label
3 | # All other instances of this file should be a copy of that one
4 | #
5 | #
6 | # Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf
7 | # and then place it in your Terraform module to automatically get
8 | # Cloud Posse's standard configuration inputs suitable for passing
9 | # to Cloud Posse modules.
10 | #
11 | # curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf
12 | #
13 | # Modules should access the whole context as `module.this.context`
14 | # to get the input variables with nulls for defaults,
15 | # for example `context = module.this.context`,
16 | # and access individual variables as `module.this.`,
17 | # with final values filled in.
18 | #
19 | # For example, when using defaults, `module.this.context.delimiter`
20 | # will be null, and `module.this.delimiter` will be `-` (hyphen).
21 | #
22 |
23 | module "this" {
24 | source = "cloudposse/label/null"
25 | version = "0.25.0" # requires Terraform >= 0.13.0
26 |
27 | enabled = var.enabled
28 | namespace = var.namespace
29 | tenant = var.tenant
30 | environment = var.environment
31 | stage = var.stage
32 | name = var.name
33 | delimiter = var.delimiter
34 | attributes = var.attributes
35 | tags = var.tags
36 | additional_tag_map = var.additional_tag_map
37 | label_order = var.label_order
38 | regex_replace_chars = var.regex_replace_chars
39 | id_length_limit = var.id_length_limit
40 | label_key_case = var.label_key_case
41 | label_value_case = var.label_value_case
42 | descriptor_formats = var.descriptor_formats
43 | labels_as_tags = var.labels_as_tags
44 |
45 | context = var.context
46 | }
47 |
48 | # Copy contents of cloudposse/terraform-null-label/variables.tf here
49 |
50 | variable "context" {
51 | type = any
52 | default = {
53 | enabled = true
54 | namespace = null
55 | tenant = null
56 | environment = null
57 | stage = null
58 | name = null
59 | delimiter = null
60 | attributes = []
61 | tags = {}
62 | additional_tag_map = {}
63 | regex_replace_chars = null
64 | label_order = []
65 | id_length_limit = null
66 | label_key_case = null
67 | label_value_case = null
68 | descriptor_formats = {}
69 | # Note: we have to use [] instead of null for unset lists due to
70 | # https://github.com/hashicorp/terraform/issues/28137
71 | # which was not fixed until Terraform 1.0.0,
72 | # but we want the default to be all the labels in `label_order`
73 | # and we want users to be able to prevent all tag generation
74 | # by setting `labels_as_tags` to `[]`, so we need
75 | # a different sentinel to indicate "default"
76 | labels_as_tags = ["unset"]
77 | }
78 | description = <<-EOT
79 | Single object for setting entire context at once.
80 | See description of individual variables for details.
81 | Leave string and numeric variables as `null` to use default value.
82 | Individual variable settings (non-null) override settings in context object,
83 | except for attributes, tags, and additional_tag_map, which are merged.
84 | EOT
85 |
86 | validation {
87 | condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
88 | error_message = "Allowed values: `lower`, `title`, `upper`."
89 | }
90 |
91 | validation {
92 | condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
93 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
94 | }
95 | }
96 |
97 | variable "enabled" {
98 | type = bool
99 | default = null
100 | description = "Set to false to prevent the module from creating any resources"
101 | }
102 |
103 | variable "namespace" {
104 | type = string
105 | default = null
106 | description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique"
107 | }
108 |
109 | variable "tenant" {
110 | type = string
111 | default = null
112 | description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for"
113 | }
114 |
115 | variable "environment" {
116 | type = string
117 | default = null
118 | description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'"
119 | }
120 |
121 | variable "stage" {
122 | type = string
123 | default = null
124 | description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'"
125 | }
126 |
127 | variable "name" {
128 | type = string
129 | default = null
130 | description = <<-EOT
131 | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
132 | This is the only ID element not also included as a `tag`.
133 | The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
134 | EOT
135 | }
136 |
137 | variable "delimiter" {
138 | type = string
139 | default = null
140 | description = <<-EOT
141 | Delimiter to be used between ID elements.
142 | Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
143 | EOT
144 | }
145 |
146 | variable "attributes" {
147 | type = list(string)
148 | default = []
149 | description = <<-EOT
150 | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
151 | in the order they appear in the list. New attributes are appended to the
152 | end of the list. The elements of the list are joined by the `delimiter`
153 | and treated as a single ID element.
154 | EOT
155 | }
156 |
157 | variable "labels_as_tags" {
158 | type = set(string)
159 | default = ["default"]
160 | description = <<-EOT
161 | Set of labels (ID elements) to include as tags in the `tags` output.
162 | Default is to include all labels.
163 | Tags with empty values will not be included in the `tags` output.
164 | Set to `[]` to suppress all generated tags.
165 | **Notes:**
166 | The value of the `name` tag, if included, will be the `id`, not the `name`.
167 | Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
168 | changed in later chained modules. Attempts to change it will be silently ignored.
169 | EOT
170 | }
171 |
172 | variable "tags" {
173 | type = map(string)
174 | default = {}
175 | description = <<-EOT
176 | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
177 | Neither the tag keys nor the tag values will be modified by this module.
178 | EOT
179 | }
180 |
181 | variable "additional_tag_map" {
182 | type = map(string)
183 | default = {}
184 | description = <<-EOT
185 | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
186 | This is for some rare cases where resources want additional configuration of tags
187 | and therefore take a list of maps with tag key, value, and additional configuration.
188 | EOT
189 | }
190 |
191 | variable "label_order" {
192 | type = list(string)
193 | default = null
194 | description = <<-EOT
195 | The order in which the labels (ID elements) appear in the `id`.
196 | Defaults to ["namespace", "environment", "stage", "name", "attributes"].
197 | You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present.
198 | EOT
199 | }
200 |
201 | variable "regex_replace_chars" {
202 | type = string
203 | default = null
204 | description = <<-EOT
205 | Terraform regular expression (regex) string.
206 | Characters matching the regex will be removed from the ID elements.
207 | If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
208 | EOT
209 | }
210 |
211 | variable "id_length_limit" {
212 | type = number
213 | default = null
214 | description = <<-EOT
215 | Limit `id` to this many characters (minimum 6).
216 | Set to `0` for unlimited length.
217 | Set to `null` for keep the existing setting, which defaults to `0`.
218 | Does not affect `id_full`.
219 | EOT
220 | validation {
221 | condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
222 | error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
223 | }
224 | }
225 |
226 | variable "label_key_case" {
227 | type = string
228 | default = null
229 | description = <<-EOT
230 | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
231 | Does not affect keys of tags passed in via the `tags` input.
232 | Possible values: `lower`, `title`, `upper`.
233 | Default value: `title`.
234 | EOT
235 |
236 | validation {
237 | condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
238 | error_message = "Allowed values: `lower`, `title`, `upper`."
239 | }
240 | }
241 |
242 | variable "label_value_case" {
243 | type = string
244 | default = null
245 | description = <<-EOT
246 | Controls the letter case of ID elements (labels) as included in `id`,
247 | set as tag values, and output by this module individually.
248 | Does not affect values of tags passed in via the `tags` input.
249 | Possible values: `lower`, `title`, `upper` and `none` (no transformation).
250 | Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
251 | Default value: `lower`.
252 | EOT
253 |
254 | validation {
255 | condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
256 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
257 | }
258 | }
259 |
260 | variable "descriptor_formats" {
261 | type = any
262 | default = {}
263 | description = <<-EOT
264 | Describe additional descriptors to be output in the `descriptors` output map.
265 | Map of maps. Keys are names of descriptors. Values are maps of the form
266 | `{
267 | format = string
268 | labels = list(string)
269 | }`
270 | (Type is `any` so the map values can later be enhanced to provide additional options.)
271 | `format` is a Terraform format string to be passed to the `format()` function.
272 | `labels` is a list of labels, in order, to pass to `format()` function.
273 | Label values will be normalized before being passed to `format()` so they will be
274 | identical to how they appear in `id`.
275 | Default is `{}` (`descriptors` output will be empty).
276 | EOT
277 | }
278 |
279 | #### End of copy of cloudposse/terraform-null-label/variables.tf
280 |
--------------------------------------------------------------------------------
/docs/migration-0.7.x-0.8.x+.md:
--------------------------------------------------------------------------------
1 | # Migration from 0.7.x to 0.8.x
2 |
3 | Version `0.8.0` of this module introduces breaking changes that, without taking additional precautions, will cause the MSK
4 | cluster to be recreated.
5 |
6 | This is because version `0.8.0` relies on the [terraform-aws-security-group](https://github.com/cloudposse/terraform-aws-security-group)
7 | module for managing the broker security group. This changes the Terraform resource address for the Security Group, which will
8 | [cause Terraform to recreate the SG](https://github.com/hashicorp/terraform-provider-aws/blob/3988f0c55ad6eb33c2b4c660312df9a4be4586b9/internal/service/kafka/cluster.go#L90-L97).
9 |
10 | To circumvent this, after bumping the module version to `0.8.0` (or above), run a plan to retrieve the resource addresses of
11 | the SG that Terraform would like to destroy, and the resource address of the SG which Terraform would like to create.
12 |
13 | First, make sure that the following variable is set:
14 |
15 | ```hcl
16 | security_group_description = "Allow inbound traffic from Security Groups and CIDRs. Allow all outbound traffic"
17 | ```
18 |
19 | Setting `security_group_description` to its "legacy" value will keep the Security Group from being replaced, and hence the MSK cluster.
20 |
21 | Finally, change the resource address of the existing Security Group.
22 |
23 | ```bash
24 | $ terraform state mv "...aws_security_group.default[0]" "...module.broker_security_group.aws_security_group.default[0]"
25 | ```
26 |
27 | This will result in an apply that will only destroy SG Rules, but not the itself or the MSK cluster.
28 |
--------------------------------------------------------------------------------
/examples/complete/context.tf:
--------------------------------------------------------------------------------
1 | #
2 | # ONLY EDIT THIS FILE IN github.com/cloudposse/terraform-null-label
3 | # All other instances of this file should be a copy of that one
4 | #
5 | #
6 | # Copy this file from https://github.com/cloudposse/terraform-null-label/blob/master/exports/context.tf
7 | # and then place it in your Terraform module to automatically get
8 | # Cloud Posse's standard configuration inputs suitable for passing
9 | # to Cloud Posse modules.
10 | #
11 | # curl -sL https://raw.githubusercontent.com/cloudposse/terraform-null-label/master/exports/context.tf -o context.tf
12 | #
13 | # Modules should access the whole context as `module.this.context`
14 | # to get the input variables with nulls for defaults,
15 | # for example `context = module.this.context`,
16 | # and access individual variables as `module.this.`,
17 | # with final values filled in.
18 | #
19 | # For example, when using defaults, `module.this.context.delimiter`
20 | # will be null, and `module.this.delimiter` will be `-` (hyphen).
21 | #
22 |
23 | module "this" {
24 | source = "cloudposse/label/null"
25 | version = "0.25.0" # requires Terraform >= 0.13.0
26 |
27 | enabled = var.enabled
28 | namespace = var.namespace
29 | tenant = var.tenant
30 | environment = var.environment
31 | stage = var.stage
32 | name = var.name
33 | delimiter = var.delimiter
34 | attributes = var.attributes
35 | tags = var.tags
36 | additional_tag_map = var.additional_tag_map
37 | label_order = var.label_order
38 | regex_replace_chars = var.regex_replace_chars
39 | id_length_limit = var.id_length_limit
40 | label_key_case = var.label_key_case
41 | label_value_case = var.label_value_case
42 | descriptor_formats = var.descriptor_formats
43 | labels_as_tags = var.labels_as_tags
44 |
45 | context = var.context
46 | }
47 |
48 | # Copy contents of cloudposse/terraform-null-label/variables.tf here
49 |
50 | variable "context" {
51 | type = any
52 | default = {
53 | enabled = true
54 | namespace = null
55 | tenant = null
56 | environment = null
57 | stage = null
58 | name = null
59 | delimiter = null
60 | attributes = []
61 | tags = {}
62 | additional_tag_map = {}
63 | regex_replace_chars = null
64 | label_order = []
65 | id_length_limit = null
66 | label_key_case = null
67 | label_value_case = null
68 | descriptor_formats = {}
69 | # Note: we have to use [] instead of null for unset lists due to
70 | # https://github.com/hashicorp/terraform/issues/28137
71 | # which was not fixed until Terraform 1.0.0,
72 | # but we want the default to be all the labels in `label_order`
73 | # and we want users to be able to prevent all tag generation
74 | # by setting `labels_as_tags` to `[]`, so we need
75 | # a different sentinel to indicate "default"
76 | labels_as_tags = ["unset"]
77 | }
78 | description = <<-EOT
79 | Single object for setting entire context at once.
80 | See description of individual variables for details.
81 | Leave string and numeric variables as `null` to use default value.
82 | Individual variable settings (non-null) override settings in context object,
83 | except for attributes, tags, and additional_tag_map, which are merged.
84 | EOT
85 |
86 | validation {
87 | condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
88 | error_message = "Allowed values: `lower`, `title`, `upper`."
89 | }
90 |
91 | validation {
92 | condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
93 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
94 | }
95 | }
96 |
97 | variable "enabled" {
98 | type = bool
99 | default = null
100 | description = "Set to false to prevent the module from creating any resources"
101 | }
102 |
103 | variable "namespace" {
104 | type = string
105 | default = null
106 | description = "ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique"
107 | }
108 |
109 | variable "tenant" {
110 | type = string
111 | default = null
112 | description = "ID element _(Rarely used, not included by default)_. A customer identifier, indicating who this instance of a resource is for"
113 | }
114 |
115 | variable "environment" {
116 | type = string
117 | default = null
118 | description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2', OR role 'prod', 'staging', 'dev', 'UAT'"
119 | }
120 |
121 | variable "stage" {
122 | type = string
123 | default = null
124 | description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'"
125 | }
126 |
127 | variable "name" {
128 | type = string
129 | default = null
130 | description = <<-EOT
131 | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
132 | This is the only ID element not also included as a `tag`.
133 | The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
134 | EOT
135 | }
136 |
137 | variable "delimiter" {
138 | type = string
139 | default = null
140 | description = <<-EOT
141 | Delimiter to be used between ID elements.
142 | Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
143 | EOT
144 | }
145 |
146 | variable "attributes" {
147 | type = list(string)
148 | default = []
149 | description = <<-EOT
150 | ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
151 | in the order they appear in the list. New attributes are appended to the
152 | end of the list. The elements of the list are joined by the `delimiter`
153 | and treated as a single ID element.
154 | EOT
155 | }
156 |
157 | variable "labels_as_tags" {
158 | type = set(string)
159 | default = ["default"]
160 | description = <<-EOT
161 | Set of labels (ID elements) to include as tags in the `tags` output.
162 | Default is to include all labels.
163 | Tags with empty values will not be included in the `tags` output.
164 | Set to `[]` to suppress all generated tags.
165 | **Notes:**
166 | The value of the `name` tag, if included, will be the `id`, not the `name`.
167 | Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
168 | changed in later chained modules. Attempts to change it will be silently ignored.
169 | EOT
170 | }
171 |
172 | variable "tags" {
173 | type = map(string)
174 | default = {}
175 | description = <<-EOT
176 | Additional tags (e.g. `{'BusinessUnit': 'XYZ'}`).
177 | Neither the tag keys nor the tag values will be modified by this module.
178 | EOT
179 | }
180 |
181 | variable "additional_tag_map" {
182 | type = map(string)
183 | default = {}
184 | description = <<-EOT
185 | Additional key-value pairs to add to each map in `tags_as_list_of_maps`. Not added to `tags` or `id`.
186 | This is for some rare cases where resources want additional configuration of tags
187 | and therefore take a list of maps with tag key, value, and additional configuration.
188 | EOT
189 | }
190 |
191 | variable "label_order" {
192 | type = list(string)
193 | default = null
194 | description = <<-EOT
195 | The order in which the labels (ID elements) appear in the `id`.
196 | Defaults to ["namespace", "environment", "stage", "name", "attributes"].
197 | You can omit any of the 6 labels ("tenant" is the 6th), but at least one must be present.
198 | EOT
199 | }
200 |
201 | variable "regex_replace_chars" {
202 | type = string
203 | default = null
204 | description = <<-EOT
205 | Terraform regular expression (regex) string.
206 | Characters matching the regex will be removed from the ID elements.
207 | If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
208 | EOT
209 | }
210 |
211 | variable "id_length_limit" {
212 | type = number
213 | default = null
214 | description = <<-EOT
215 | Limit `id` to this many characters (minimum 6).
216 | Set to `0` for unlimited length.
217 | Set to `null` for keep the existing setting, which defaults to `0`.
218 | Does not affect `id_full`.
219 | EOT
220 | validation {
221 | condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
222 | error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
223 | }
224 | }
225 |
226 | variable "label_key_case" {
227 | type = string
228 | default = null
229 | description = <<-EOT
230 | Controls the letter case of the `tags` keys (label names) for tags generated by this module.
231 | Does not affect keys of tags passed in via the `tags` input.
232 | Possible values: `lower`, `title`, `upper`.
233 | Default value: `title`.
234 | EOT
235 |
236 | validation {
237 | condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
238 | error_message = "Allowed values: `lower`, `title`, `upper`."
239 | }
240 | }
241 |
242 | variable "label_value_case" {
243 | type = string
244 | default = null
245 | description = <<-EOT
246 | Controls the letter case of ID elements (labels) as included in `id`,
247 | set as tag values, and output by this module individually.
248 | Does not affect values of tags passed in via the `tags` input.
249 | Possible values: `lower`, `title`, `upper` and `none` (no transformation).
250 | Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
251 | Default value: `lower`.
252 | EOT
253 |
254 | validation {
255 | condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
256 | error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
257 | }
258 | }
259 |
260 | variable "descriptor_formats" {
261 | type = any
262 | default = {}
263 | description = <<-EOT
264 | Describe additional descriptors to be output in the `descriptors` output map.
265 | Map of maps. Keys are names of descriptors. Values are maps of the form
266 | `{
267 | format = string
268 | labels = list(string)
269 | }`
270 | (Type is `any` so the map values can later be enhanced to provide additional options.)
271 | `format` is a Terraform format string to be passed to the `format()` function.
272 | `labels` is a list of labels, in order, to pass to `format()` function.
273 | Label values will be normalized before being passed to `format()` so they will be
274 | identical to how they appear in `id`.
275 | Default is `{}` (`descriptors` output will be empty).
276 | EOT
277 | }
278 |
279 | #### End of copy of cloudposse/terraform-null-label/variables.tf
280 |
--------------------------------------------------------------------------------
/examples/complete/fixtures.us-east-2.tfvars:
--------------------------------------------------------------------------------
1 | region = "us-east-2"
2 |
3 | namespace = "eg"
4 |
5 | environment = "ue2"
6 |
7 | stage = "test"
8 |
9 | name = "msk-test"
10 |
11 | availability_zones = ["us-east-2a", "us-east-2b"]
12 |
13 | # https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html
14 | kafka_version = "3.4.0"
15 |
16 | broker_per_zone = 2
17 |
18 | broker_instance_type = "kafka.t3.small"
19 |
20 | create_security_group = true
21 |
22 | zone_id = "Z3SO0TKDDQ0RGG"
23 |
24 | # This variable specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable.
25 | # This corresponds to the total number of broker endpoints created by the module.
26 | # Calculate this number by multiplying the `broker_per_zone` variable by the subnet count.
27 | # This variable is necessary to prevent the Terraform error:
28 | # The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created.
29 | broker_dns_records_count = 4
30 |
31 | # Unauthenticated cannot be set to `false` without enabling any authentication mechanisms
32 | client_allow_unauthenticated = true
33 |
--------------------------------------------------------------------------------
/examples/complete/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
5 | module "vpc" {
6 | source = "cloudposse/vpc/aws"
7 | version = "2.1.0"
8 |
9 | ipv4_primary_cidr_block = "172.16.0.0/16"
10 |
11 | context = module.this.context
12 | }
13 |
14 | module "subnets" {
15 | source = "cloudposse/dynamic-subnets/aws"
16 | version = "2.3.0"
17 |
18 | availability_zones = var.availability_zones
19 | vpc_id = module.vpc.vpc_id
20 | igw_id = [module.vpc.igw_id]
21 | ipv4_cidr_block = [module.vpc.vpc_cidr_block]
22 | nat_gateway_enabled = false
23 | nat_instance_enabled = false
24 |
25 | context = module.this.context
26 | }
27 |
28 | module "kafka" {
29 | source = "../../"
30 |
31 | zone_id = var.zone_id
32 | vpc_id = module.vpc.vpc_id
33 | subnet_ids = module.this.enabled ? module.subnets.private_subnet_ids : [""]
34 | kafka_version = var.kafka_version
35 | broker_per_zone = var.broker_per_zone
36 | broker_instance_type = var.broker_instance_type
37 | public_access_enabled = var.public_access_enabled
38 | broker_dns_records_count = var.broker_dns_records_count
39 |
40 | allowed_security_group_ids = concat(var.allowed_security_group_ids, [module.vpc.vpc_default_security_group_id])
41 | allowed_cidr_blocks = var.allowed_cidr_blocks
42 | associated_security_group_ids = var.associated_security_group_ids
43 | create_security_group = var.create_security_group
44 | security_group_name = var.security_group_name
45 | security_group_description = var.security_group_description
46 | security_group_create_before_destroy = var.security_group_create_before_destroy
47 | preserve_security_group_id = var.preserve_security_group_id
48 | security_group_create_timeout = var.security_group_create_timeout
49 | security_group_delete_timeout = var.security_group_delete_timeout
50 | allow_all_egress = var.allow_all_egress
51 | additional_security_group_rules = var.additional_security_group_rules
52 | inline_rules_enabled = var.inline_rules_enabled
53 |
54 | client_allow_unauthenticated = var.client_allow_unauthenticated
55 | client_sasl_scram_enabled = var.client_sasl_scram_enabled
56 | client_sasl_iam_enabled = var.client_sasl_iam_enabled
57 | client_tls_auth_enabled = var.client_tls_auth_enabled
58 | client_sasl_scram_secret_association_enabled = var.client_sasl_scram_secret_association_enabled
59 | client_sasl_scram_secret_association_arns = var.client_sasl_scram_secret_association_arns
60 |
61 | certificate_authority_arns = var.certificate_authority_arns
62 |
63 | ## Use custom broker DNS name to avoid resource conflict for concurrent test runs
64 | ## `%%ID%%` is the expected placeholder for cluster node number in `custom_broker_dns_name`
65 | custom_broker_dns_name = format("msk-test-broker-%s-%s", var.attributes[0], "%%ID%%")
66 |
67 | context = module.this.context
68 | }
69 |
--------------------------------------------------------------------------------
/examples/complete/outputs.tf:
--------------------------------------------------------------------------------
1 | output "cluster_name" {
2 | value = module.kafka.cluster_name
3 | description = "The cluster name of the MSK cluster"
4 | }
5 |
6 | output "cluster_arn" {
7 | value = module.kafka.cluster_arn
8 | description = "Amazon Resource Name (ARN) of the MSK cluster"
9 | }
10 |
11 | output "config_arn" {
12 | value = module.kafka.config_arn
13 | description = "Amazon Resource Name (ARN) of the MSK configuration"
14 | }
15 |
16 | output "hostnames" {
17 | value = module.kafka.hostnames
18 | description = "List of MSK Cluster broker DNS hostnames"
19 | }
20 |
21 | output "security_group_id" {
22 | value = module.kafka.security_group_id
23 | description = "The ID of the security group rule for the MSK cluster"
24 | }
25 |
26 | output "security_group_name" {
27 | value = module.kafka.security_group_name
28 | description = "The name of the security group rule for the MSK cluster"
29 | }
30 |
31 | output "security_group_arn" {
32 | value = module.kafka.security_group_arn
33 | description = "The ARN of the created security group"
34 | }
35 |
36 | output "storage_mode" {
37 | value = module.kafka.storage_mode
38 | description = "Storage mode for supported storage tiers"
39 | }
40 |
41 | output "bootstrap_brokers" {
42 | value = module.kafka.bootstrap_brokers
43 | description = "Comma separated list of one or more hostname:port pairs of Kafka brokers suitable to bootstrap connectivity to the Kafka cluster"
44 | }
45 |
46 | output "bootstrap_brokers_tls" {
47 | value = module.kafka.bootstrap_brokers_tls
48 | description = "Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for access to the Kafka cluster using TLS"
49 | }
50 |
51 | output "bootstrap_brokers_public_tls" {
52 | value = module.kafka.bootstrap_brokers_public_tls
53 | description = "Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for public access to the Kafka cluster using TLS"
54 | }
55 |
56 | output "bootstrap_brokers_sasl_scram" {
57 | value = module.kafka.bootstrap_brokers_sasl_scram
58 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for access to the Kafka cluster using SASL/SCRAM"
59 | }
60 |
61 | output "bootstrap_brokers_public_sasl_scram" {
62 | value = module.kafka.bootstrap_brokers_public_sasl_scram
63 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for public access to the Kafka cluster using SASL/SCRAM"
64 | }
65 |
66 | output "bootstrap_brokers_sasl_iam" {
67 | value = module.kafka.bootstrap_brokers_sasl_iam
68 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for access to the Kafka cluster using SASL/IAM"
69 | }
70 |
71 | output "bootstrap_brokers_public_sasl_iam" {
72 | value = module.kafka.bootstrap_brokers_public_sasl_iam
73 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for public access to the Kafka cluster using SASL/IAM"
74 | }
75 |
76 | output "zookeeper_connect_string" {
77 | value = module.kafka.zookeeper_connect_string
78 | description = "Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster"
79 | }
80 |
81 | output "zookeeper_connect_string_tls" {
82 | value = module.kafka.zookeeper_connect_string_tls
83 | description = "Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster via TLS"
84 | }
85 |
86 | output "broker_endpoints" {
87 | value = module.kafka.broker_endpoints
88 | description = "List of broker endpoints"
89 | }
90 |
91 | output "current_version" {
92 | value = module.kafka.current_version
93 | description = "Current version of the MSK Cluster"
94 | }
95 |
96 | output "latest_revision" {
97 | value = module.kafka.latest_revision
98 | description = "Latest revision of the MSK configuration"
99 | }
100 |
--------------------------------------------------------------------------------
/examples/complete/security-group-variables.tf:
--------------------------------------------------------------------------------
1 | # security-group-variables Version: 3
2 | #
3 | # Copy this file from https://github.com/cloudposse/terraform-aws-security-group/blob/master/exports/security-group-variables.tf
4 | # and EDIT IT TO SUIT YOUR PROJECT. Update the version number above if you update this file from a later version.
5 | # Unlike null-label context.tf, this file cannot be automatically updated
6 | # because of the tight integration with the module using it.
7 | ##
8 | # Delete this top comment block, except for the first line (version number),
9 | # REMOVE COMMENTS below that are intended for the initial implementor and not maintainers or end users.
10 | #
11 | # This file provides the standard inputs that all Cloud Posse Open Source
12 | # Terraform module that create AWS Security Groups should implement.
13 | # This file does NOT provide implementation of the inputs, as that
14 | # of course varies with each module.
15 | #
16 | # This file declares some standard outputs modules should create,
17 | # but the declarations should be moved to `outputs.tf` and of course
18 | # may need to be modified based on the module's use of security-group.
19 | #
20 |
21 |
22 | variable "create_security_group" {
23 | type = bool
24 | description = "Set `true` to create and configure a new security group. If false, `associated_security_group_ids` must be provided."
25 | default = true
26 | }
27 |
28 | variable "associated_security_group_ids" {
29 | type = list(string)
30 | description = <<-EOT
31 | A list of IDs of Security Groups to associate the created resource with, in addition to the created security group.
32 | These security groups will not be modified and, if `create_security_group` is `false`, must have rules providing the desired access.
33 | EOT
34 | default = []
35 | }
36 |
37 | ##
38 | ## allowed_* inputs are optional, because the same thing can be accomplished by
39 | ## providing `additional_security_group_rules`. However, if the rules this
40 | ## module creates are non-trivial (for example, opening ports based on
41 | ## feature settings, see https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/blob/3fe23c402cc420799ae721186812482335f78d24/main.tf#L14-L53 )
42 | ## then it makes sense to include these.
43 | ## Reasons not to include some or all of these inputs include
44 | ## - too hard to implement
45 | ## - does not make sense (particularly the IPv6 inputs if the underlying resource does not yet support IPv6)
46 | ## - likely to confuse users
47 | ## - likely to invite count/for_each issues
48 | variable "allowed_security_group_ids" {
49 | type = list(string)
50 | description = <<-EOT
51 | A list of IDs of Security Groups to allow access to the security group created by this module.
52 | The length of this list must be known at "plan" time.
53 | EOT
54 | default = []
55 | }
56 |
57 | variable "allowed_cidr_blocks" {
58 | type = list(string)
59 | description = <<-EOT
60 | A list of IPv4 CIDRs to allow access to the security group created by this module.
61 | The length of this list must be known at "plan" time.
62 | EOT
63 | default = []
64 | }
65 | ## End of optional allowed_* ###########
66 |
67 | variable "security_group_name" {
68 | type = list(string)
69 | description = <<-EOT
70 | The name to assign to the created security group. Must be unique within the VPC.
71 | If not provided, will be derived from the `null-label.context` passed in.
72 | If `create_before_destroy` is true, will be used as a name prefix.
73 | EOT
74 | default = []
75 | }
76 |
77 | variable "security_group_description" {
78 | type = string
79 | description = <<-EOT
80 | The description to assign to the created Security Group.
81 | Warning: Changing the description causes the security group to be replaced.
82 | EOT
83 | default = "Managed by Terraform"
84 | }
85 |
86 | variable "security_group_create_before_destroy" {
87 | type = bool
88 | description = <<-EOT
89 | Set `true` to enable terraform `create_before_destroy` behavior on the created security group.
90 | We only recommend setting this `false` if you are importing an existing security group
91 | that you do not want replaced and therefore need full control over its name.
92 | Note that changing this value will always cause the security group to be replaced.
93 | EOT
94 | default = true
95 | }
96 |
97 | variable "preserve_security_group_id" {
98 | type = bool
99 | description = <<-EOT
100 | When `false` and `security_group_create_before_destroy` is `true`, changes to security group rules
101 | cause a new security group to be created with the new rules, and the existing security group is then
102 | replaced with the new one, eliminating any service interruption.
103 | When `true` or when changing the value (from `false` to `true` or from `true` to `false`),
104 | existing security group rules will be deleted before new ones are created, resulting in a service interruption,
105 | but preserving the security group itself.
106 | **NOTE:** Setting this to `true` does not guarantee the security group will never be replaced,
107 | it only keeps changes to the security group rules from triggering a replacement.
108 | See the [terraform-aws-security-group README](https://github.com/cloudposse/terraform-aws-security-group) for further discussion.
109 | EOT
110 | default = false
111 | }
112 |
113 | variable "security_group_create_timeout" {
114 | type = string
115 | description = "How long to wait for the security group to be created."
116 | default = "10m"
117 | }
118 |
119 | variable "security_group_delete_timeout" {
120 | type = string
121 | description = <<-EOT
122 | How long to retry on `DependencyViolation` errors during security group deletion from
123 | lingering ENIs left by certain AWS services such as Elastic Load Balancing.
124 | EOT
125 | default = "15m"
126 | }
127 |
128 | variable "allow_all_egress" {
129 | type = bool
130 | description = <<-EOT
131 | If `true`, the created security group will allow egress on all ports and protocols to all IP addresses.
132 | If this is false and no egress rules are otherwise specified, then no egress will be allowed.
133 | EOT
134 | default = true
135 | }
136 |
137 | variable "additional_security_group_rules" {
138 | type = list(any)
139 | description = <<-EOT
140 | A list of Security Group rule objects to add to the created security group, in addition to the ones
141 | this module normally creates. (To suppress the module's rules, set `create_security_group` to false
142 | and supply your own security group(s) via `associated_security_group_ids`.)
143 | The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except
144 | for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time.
145 | For more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule
146 | and https://github.com/cloudposse/terraform-aws-security-group.
147 | EOT
148 | default = []
149 | }
150 |
151 | #### We do not expose an `additional_security_group_rule_matrix` input for a few reasons:
152 | # - It is a convenience and ultimately provides no rules that cannot be provided via `additional_security_group_rules`
153 | # - It is complicated and can, in some situations, create problems for Terraform `for_each`
154 | # - It is difficult to document and easy to make mistakes using it
155 |
156 |
157 | #
158 | #
159 | #### The variables below (but not the outputs) can be omitted if not needed, and may need their descriptions modified
160 | #
161 | #
162 |
163 | #############################################################################################
164 | ## Special note about inline_rules_enabled and revoke_rules_on_delete
165 | ##
166 | ## The security-group inputs inline_rules_enabled and revoke_rules_on_delete should not
167 | ## be exposed in other modules unless there is a strong reason for them to be used.
168 | ## We discourage the use of inline_rules_enabled and we rarely need or want
169 | ## revoke_rules_on_delete, so we do not want to clutter our interface with those inputs.
170 | ##
171 | ## If someone wants to enable either of those options, they have the option
172 | ## of creating a security group configured as they like
173 | ## and passing it in as the target security group.
174 | #############################################################################################
175 |
176 | variable "inline_rules_enabled" {
177 | type = bool
178 | description = <<-EOT
179 | NOT RECOMMENDED. Create rules "inline" instead of as separate `aws_security_group_rule` resources.
180 | See [#20046](https://github.com/hashicorp/terraform-provider-aws/issues/20046) for one of several issues with inline rules.
181 | See [this post](https://github.com/hashicorp/terraform-provider-aws/pull/9032#issuecomment-639545250) for details on the difference between inline rules and rule resources.
182 | EOT
183 | default = false
184 | }
185 |
--------------------------------------------------------------------------------
/examples/complete/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | type = string
3 | description = "AWS region"
4 | nullable = false
5 | }
6 |
7 | variable "availability_zones" {
8 | type = list(string)
9 | description = "List of availability zones for VPC creation"
10 | nullable = false
11 | }
12 |
13 | variable "zone_id" {
14 | type = string
15 | description = "ZoneID for DNS Hostnames of MSK Brokers"
16 | }
17 |
18 | variable "broker_dns_records_count" {
19 | type = number
20 | description = <<-EOT
21 | This variable specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable.
22 | This corresponds to the total number of broker endpoints created by the module.
23 | Calculate this number by multiplying the `broker_per_zone` variable by the subnet count.
24 | This variable is necessary to prevent the Terraform error:
25 | The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created.
26 | EOT
27 | default = 0
28 | nullable = false
29 | }
30 |
31 | variable "kafka_version" {
32 | type = string
33 | description = <<-EOT
34 | The desired Kafka software version.
35 | Refer to https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html for more details
36 | EOT
37 | nullable = false
38 | }
39 |
40 | variable "broker_instance_type" {
41 | type = string
42 | description = "Specify the instance type to use for the kafka brokers"
43 | nullable = false
44 | }
45 |
46 | variable "broker_per_zone" {
47 | type = number
48 | description = "Number of Kafka brokers per zone"
49 | nullable = false
50 | }
51 |
52 | variable "public_access_enabled" {
53 | type = bool
54 | default = false
55 | description = "Enable public access to MSK cluster (given that all of the requirements are met)"
56 | nullable = false
57 | }
58 |
59 | variable "client_allow_unauthenticated" {
60 | type = bool
61 | default = false
62 | description = "Enable unauthenticated access"
63 | nullable = false
64 | }
65 |
66 | variable "client_sasl_iam_enabled" {
67 | type = bool
68 | default = false
69 | description = "Enable client authentication via IAM policies. Cannot be set to `true` at the same time as `client_tls_auth_enabled`"
70 | nullable = false
71 | }
72 |
73 | variable "client_tls_auth_enabled" {
74 | type = bool
75 | default = false
76 | description = "Set `true` to enable the Client TLS Authentication"
77 | nullable = false
78 | }
79 |
80 | variable "client_sasl_scram_enabled" {
81 | type = bool
82 | default = false
83 | description = "Enable SCRAM client authentication via AWS Secrets Manager. Cannot be set to `true` at the same time as `client_tls_auth_enabled`"
84 | nullable = false
85 | }
86 |
87 | variable "certificate_authority_arns" {
88 | type = list(string)
89 | default = []
90 | description = "List of ACM Certificate Authority Amazon Resource Names (ARNs) to be used for TLS client authentication"
91 | nullable = false
92 | }
93 |
94 | variable "client_sasl_scram_secret_association_enabled" {
95 | type = bool
96 | default = true
97 | description = "Enable the list of AWS Secrets Manager secret ARNs for SCRAM authentication"
98 | nullable = false
99 | }
100 |
101 | variable "client_sasl_scram_secret_association_arns" {
102 | type = list(string)
103 | default = []
104 | description = "List of AWS Secrets Manager secret ARNs for SCRAM authentication"
105 | nullable = false
106 | }
107 |
--------------------------------------------------------------------------------
/examples/complete/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | enabled = module.this.enabled
3 |
4 | broker_endpoints = local.enabled ? flatten(data.aws_msk_broker_nodes.default[0].node_info_list[*].endpoints) : []
5 |
6 | # If var.storage_autoscaling_max_capacity is not set, don't autoscale past current size
7 | broker_volume_size_max = coalesce(var.storage_autoscaling_max_capacity, var.broker_volume_size)
8 |
9 | # var.client_broker types
10 | plaintext = "PLAINTEXT"
11 | tls_plaintext = "TLS_PLAINTEXT"
12 | tls = "TLS"
13 |
14 | # The following ports are not configurable. See: https://docs.aws.amazon.com/msk/latest/developerguide/client-access.html#port-info
15 | protocols = {
16 | plaintext = {
17 | name = "plaintext"
18 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#bootstrap_brokers
19 | enabled = contains([local.plaintext, local.tls_plaintext], var.client_broker)
20 | port = 9092
21 | }
22 | tls = {
23 | name = "TLS"
24 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#bootstrap_brokers_tls
25 | enabled = contains([local.tls_plaintext, local.tls], var.client_broker)
26 | port = 9094
27 | }
28 | sasl_scram = {
29 | name = "SASL/SCRAM"
30 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#bootstrap_brokers_sasl_scram
31 | enabled = var.client_sasl_scram_enabled && contains([local.tls_plaintext, local.tls], var.client_broker)
32 | port = 9096
33 | }
34 | sasl_iam = {
35 | name = "SASL/IAM"
36 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#bootstrap_brokers_sasl_iam
37 | enabled = var.client_sasl_iam_enabled && contains([local.tls_plaintext, local.tls], var.client_broker)
38 | port = 9098
39 | }
40 | # The following two protocols are always enabled.
41 | # See: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#zookeeper_connect_string
42 | # and https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#zookeeper_connect_string_tls
43 | zookeeper_plaintext = {
44 | name = "Zookeeper plaintext"
45 | enabled = true
46 | port = 2181
47 | }
48 | zookeeper_tls = {
49 | name = "Zookeeper TLS"
50 | enabled = true
51 | port = 2182
52 | }
53 | # The following two protocols are enabled on demand of user
54 | # See: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#jmx_exporter
55 | # and https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#node_exporter
56 | # and https://docs.aws.amazon.com/msk/latest/developerguide/open-monitoring.html#set-up-prometheus-host
57 | jmx_exporter = {
58 | name = "JMX Exporter"
59 | enabled = var.jmx_exporter_enabled
60 | port = 11001
61 | }
62 | node_exporter = {
63 | name = "Node Exporter"
64 | enabled = var.node_exporter_enabled
65 | port = 11002
66 | }
67 | }
68 | }
69 |
70 | data "aws_msk_broker_nodes" "default" {
71 | count = local.enabled ? 1 : 0
72 |
73 | cluster_arn = one(aws_msk_cluster.default[*].arn)
74 | }
75 |
76 | # https://github.com/cloudposse/terraform-aws-security-group/blob/master/docs/migration-v1-v2.md
77 | module "security_group" {
78 | source = "cloudposse/security-group/aws"
79 | version = "2.2.0"
80 |
81 | enabled = local.enabled && var.create_security_group
82 |
83 | vpc_id = var.vpc_id
84 |
85 | security_group_name = var.security_group_name
86 | create_before_destroy = var.security_group_create_before_destroy
87 | preserve_security_group_id = var.preserve_security_group_id
88 | security_group_create_timeout = var.security_group_create_timeout
89 | security_group_delete_timeout = var.security_group_delete_timeout
90 | security_group_description = var.security_group_description
91 | allow_all_egress = var.allow_all_egress
92 | rules = var.additional_security_group_rules
93 | inline_rules_enabled = var.inline_rules_enabled
94 |
95 | rule_matrix = [
96 | {
97 | source_security_group_ids = var.allowed_security_group_ids
98 | cidr_blocks = var.allowed_cidr_blocks
99 | rules = [
100 | for protocol_key, protocol in local.protocols : {
101 | key = protocol_key
102 | type = "ingress"
103 | from_port = protocol.port
104 | to_port = protocol.port
105 | protocol = "tcp"
106 | description = format(var.security_group_rule_description, protocol.name)
107 | } if protocol.enabled
108 | ]
109 | }
110 | ]
111 |
112 | context = module.this.context
113 | }
114 |
115 | resource "aws_msk_configuration" "config" {
116 | count = local.enabled ? 1 : 0
117 |
118 | kafka_versions = [var.kafka_version]
119 | name = join("-", [module.this.id, replace(var.kafka_version, ".", "-")])
120 | description = "Configuration for Amazon Managed Streaming for Kafka"
121 |
122 | server_properties = join("\n", [for k in keys(var.properties) : format("%s = %s", k, var.properties[k])])
123 |
124 | lifecycle {
125 | create_before_destroy = true
126 | }
127 | }
128 |
129 | resource "aws_msk_cluster" "default" {
130 | #bridgecrew:skip=BC_AWS_LOGGING_18:Skipping `Amazon MSK cluster logging is not enabled` check since it can be enabled with cloudwatch_logs_enabled = true
131 | #bridgecrew:skip=BC_AWS_LOGGING_18:Skipping `Amazon MSK cluster logging is not enabled` check since it can be enabled with cloudwatch_logs_enabled = true
132 | #bridgecrew:skip=BC_AWS_GENERAL_32:Skipping `MSK cluster encryption at rest and in transit is not enabled` check since it can be enabled with encryption_in_cluster = true
133 | count = local.enabled ? 1 : 0
134 |
135 | cluster_name = module.this.id
136 | kafka_version = var.kafka_version
137 | number_of_broker_nodes = var.broker_per_zone * length(var.subnet_ids)
138 | enhanced_monitoring = var.enhanced_monitoring
139 |
140 | broker_node_group_info {
141 | instance_type = var.broker_instance_type
142 | client_subnets = var.subnet_ids
143 | security_groups = var.create_security_group ? concat(var.associated_security_group_ids, [module.security_group.id]) : var.associated_security_group_ids
144 |
145 | storage_info {
146 | ebs_storage_info {
147 | volume_size = var.broker_volume_size
148 | }
149 | }
150 |
151 | connectivity_info {
152 | public_access {
153 | type = var.public_access_enabled ? "SERVICE_PROVIDED_EIPS" : "DISABLED"
154 | }
155 | }
156 | }
157 |
158 | configuration_info {
159 | arn = aws_msk_configuration.config[0].arn
160 | revision = aws_msk_configuration.config[0].latest_revision
161 | }
162 |
163 | encryption_info {
164 | encryption_in_transit {
165 | client_broker = var.client_broker
166 | in_cluster = var.encryption_in_cluster
167 | }
168 | encryption_at_rest_kms_key_arn = var.encryption_at_rest_kms_key_arn
169 | }
170 |
171 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster.html#client_authentication
172 | client_authentication {
173 | # Unauthenticated cannot be set to `false` without enabling any authentication mechanisms
174 | unauthenticated = var.client_allow_unauthenticated
175 |
176 | dynamic "tls" {
177 | for_each = var.client_tls_auth_enabled ? [1] : []
178 | content {
179 | certificate_authority_arns = var.certificate_authority_arns
180 | }
181 | }
182 |
183 | sasl {
184 | scram = var.client_sasl_scram_enabled
185 | iam = var.client_sasl_iam_enabled
186 | }
187 | }
188 |
189 | open_monitoring {
190 | prometheus {
191 | jmx_exporter {
192 | enabled_in_broker = var.jmx_exporter_enabled
193 | }
194 | node_exporter {
195 | enabled_in_broker = var.node_exporter_enabled
196 | }
197 | }
198 | }
199 |
200 | logging_info {
201 | broker_logs {
202 | cloudwatch_logs {
203 | enabled = var.cloudwatch_logs_enabled
204 | log_group = var.cloudwatch_logs_log_group
205 | }
206 | firehose {
207 | enabled = var.firehose_logs_enabled
208 | delivery_stream = var.firehose_delivery_stream
209 | }
210 | s3 {
211 | enabled = var.s3_logs_enabled
212 | bucket = var.s3_logs_bucket
213 | prefix = var.s3_logs_prefix
214 | }
215 | }
216 | }
217 |
218 | lifecycle {
219 | ignore_changes = [
220 | broker_node_group_info[0].storage_info[0].ebs_storage_info[0].provisioned_throughput
221 | ]
222 | }
223 |
224 | tags = module.this.tags
225 | }
226 |
227 | resource "aws_msk_scram_secret_association" "default" {
228 | count = local.enabled && var.client_sasl_scram_enabled && var.client_sasl_scram_secret_association_enabled ? 1 : 0
229 |
230 | cluster_arn = aws_msk_cluster.default[0].arn
231 | secret_arn_list = var.client_sasl_scram_secret_association_arns
232 | }
233 |
234 | module "hostname" {
235 | count = local.enabled && var.zone_id != null && var.zone_id != "" ? var.broker_dns_records_count : 0
236 |
237 | source = "cloudposse/route53-cluster-hostname/aws"
238 | version = "0.13.0"
239 |
240 | zone_id = var.zone_id
241 | dns_name = var.custom_broker_dns_name == null ? "${module.this.name}-broker-${count.index + 1}" : replace(var.custom_broker_dns_name, "%%ID%%", count.index + 1)
242 | records = local.enabled ? [local.broker_endpoints[count.index]] : []
243 |
244 | context = module.this.context
245 | }
246 |
247 | resource "aws_appautoscaling_target" "default" {
248 | count = local.enabled && var.autoscaling_enabled ? 1 : 0
249 |
250 | max_capacity = local.broker_volume_size_max
251 | min_capacity = 1
252 | resource_id = aws_msk_cluster.default[0].arn
253 | scalable_dimension = "kafka:broker-storage:VolumeSize"
254 | service_namespace = "kafka"
255 | }
256 |
257 | resource "aws_appautoscaling_policy" "default" {
258 | count = local.enabled && var.autoscaling_enabled ? 1 : 0
259 |
260 | name = "${aws_msk_cluster.default[0].cluster_name}-broker-scaling"
261 | policy_type = "TargetTrackingScaling"
262 | resource_id = aws_msk_cluster.default[0].arn
263 | scalable_dimension = one(aws_appautoscaling_target.default[*].scalable_dimension)
264 | service_namespace = one(aws_appautoscaling_target.default[*].service_namespace)
265 |
266 | target_tracking_scaling_policy_configuration {
267 | disable_scale_in = var.storage_autoscaling_disable_scale_in
268 |
269 | predefined_metric_specification {
270 | predefined_metric_type = "KafkaBrokerStorageUtilization"
271 | }
272 |
273 | target_value = var.storage_autoscaling_target_value
274 | }
275 | }
276 |
--------------------------------------------------------------------------------
/outputs.tf:
--------------------------------------------------------------------------------
1 | output "cluster_arn" {
2 | value = one(aws_msk_cluster.default[*].arn)
3 | description = "Amazon Resource Name (ARN) of the MSK cluster"
4 | }
5 |
6 | output "cluster_name" {
7 | value = one(aws_msk_cluster.default[*].cluster_name)
8 | description = "MSK Cluster name"
9 | }
10 |
11 | output "storage_mode" {
12 | value = one(aws_msk_cluster.default[*].storage_mode)
13 | description = "Storage mode for supported storage tiers"
14 | }
15 |
16 | output "bootstrap_brokers" {
17 | value = one(aws_msk_cluster.default[*].bootstrap_brokers)
18 | description = "Comma separated list of one or more hostname:port pairs of Kafka brokers suitable to bootstrap connectivity to the Kafka cluster"
19 | }
20 |
21 | output "bootstrap_brokers_tls" {
22 | value = one(aws_msk_cluster.default[*].bootstrap_brokers_tls)
23 | description = "Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for access to the Kafka cluster using TLS"
24 | }
25 |
26 | output "bootstrap_brokers_public_tls" {
27 | value = one(aws_msk_cluster.default[*].bootstrap_brokers_public_tls)
28 | description = "Comma separated list of one or more DNS names (or IP addresses) and TLS port pairs for public access to the Kafka cluster using TLS"
29 | }
30 |
31 | output "bootstrap_brokers_sasl_scram" {
32 | value = one(aws_msk_cluster.default[*].bootstrap_brokers_sasl_scram)
33 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for access to the Kafka cluster using SASL/SCRAM"
34 | }
35 |
36 | output "bootstrap_brokers_public_sasl_scram" {
37 | value = one(aws_msk_cluster.default[*].bootstrap_brokers_public_sasl_scram)
38 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL SCRAM port pairs for public access to the Kafka cluster using SASL/SCRAM"
39 | }
40 |
41 | output "bootstrap_brokers_sasl_iam" {
42 | value = one(aws_msk_cluster.default[*].bootstrap_brokers_sasl_iam)
43 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for access to the Kafka cluster using SASL/IAM"
44 | }
45 |
46 | output "bootstrap_brokers_public_sasl_iam" {
47 | value = one(aws_msk_cluster.default[*].bootstrap_brokers_public_sasl_iam)
48 | description = "Comma separated list of one or more DNS names (or IP addresses) and SASL IAM port pairs for public access to the Kafka cluster using SASL/IAM"
49 | }
50 |
51 | output "zookeeper_connect_string" {
52 | value = one(aws_msk_cluster.default[*].zookeeper_connect_string)
53 | description = "Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster"
54 | }
55 |
56 | output "zookeeper_connect_string_tls" {
57 | value = one(aws_msk_cluster.default[*].zookeeper_connect_string_tls)
58 | description = "Comma separated list of one or more hostname:port pairs to connect to the Apache Zookeeper cluster via TLS"
59 | }
60 |
61 | output "broker_endpoints" {
62 | value = local.broker_endpoints
63 | description = "List of broker endpoints"
64 | }
65 |
66 | output "current_version" {
67 | value = one(aws_msk_cluster.default[*].current_version)
68 | description = "Current version of the MSK Cluster"
69 | }
70 |
71 | output "config_arn" {
72 | value = one(aws_msk_configuration.config[*].arn)
73 | description = "Amazon Resource Name (ARN) of the MSK configuration"
74 | }
75 |
76 | output "latest_revision" {
77 | value = one(aws_msk_configuration.config[*].latest_revision)
78 | description = "Latest revision of the MSK configuration"
79 | }
80 |
81 | output "hostnames" {
82 | value = module.hostname[*].hostname
83 | description = "List of MSK Cluster broker DNS hostnames"
84 | }
85 |
86 | output "security_group_id" {
87 | value = module.security_group.id
88 | description = "The ID of the created security group"
89 | }
90 |
91 | output "security_group_arn" {
92 | value = module.security_group.arn
93 | description = "The ARN of the created security group"
94 | }
95 |
96 | output "security_group_name" {
97 | value = module.security_group.name
98 | }
99 |
--------------------------------------------------------------------------------
/security-group-variables.tf:
--------------------------------------------------------------------------------
1 | # security-group-variables Version: 3
2 | #
3 | # Copy this file from https://github.com/cloudposse/terraform-aws-security-group/blob/master/exports/security-group-variables.tf
4 | # and EDIT IT TO SUIT YOUR PROJECT. Update the version number above if you update this file from a later version.
5 | # Unlike null-label context.tf, this file cannot be automatically updated
6 | # because of the tight integration with the module using it.
7 | ##
8 | # Delete this top comment block, except for the first line (version number),
9 | # REMOVE COMMENTS below that are intended for the initial implementor and not maintainers or end users.
10 | #
11 | # This file provides the standard inputs that all Cloud Posse Open Source
12 | # Terraform module that create AWS Security Groups should implement.
13 | # This file does NOT provide implementation of the inputs, as that
14 | # of course varies with each module.
15 | #
16 | # This file declares some standard outputs modules should create,
17 | # but the declarations should be moved to `outputs.tf` and of course
18 | # may need to be modified based on the module's use of security-group.
19 | #
20 |
21 |
22 | variable "create_security_group" {
23 | type = bool
24 | description = "Set `true` to create and configure a new security group. If false, `associated_security_group_ids` must be provided."
25 | default = true
26 | }
27 |
28 | variable "associated_security_group_ids" {
29 | type = list(string)
30 | description = <<-EOT
31 | A list of IDs of Security Groups to associate the created resource with, in addition to the created security group.
32 | These security groups will not be modified and, if `create_security_group` is `false`, must have rules providing the desired access.
33 | EOT
34 | default = []
35 | }
36 |
37 | ##
38 | ## allowed_* inputs are optional, because the same thing can be accomplished by
39 | ## providing `additional_security_group_rules`. However, if the rules this
40 | ## module creates are non-trivial (for example, opening ports based on
41 | ## feature settings, see https://github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster/blob/3fe23c402cc420799ae721186812482335f78d24/main.tf#L14-L53 )
42 | ## then it makes sense to include these.
43 | ## Reasons not to include some or all of these inputs include
44 | ## - too hard to implement
45 | ## - does not make sense (particularly the IPv6 inputs if the underlying resource does not yet support IPv6)
46 | ## - likely to confuse users
47 | ## - likely to invite count/for_each issues
48 | variable "allowed_security_group_ids" {
49 | type = list(string)
50 | description = <<-EOT
51 | A list of IDs of Security Groups to allow access to the security group created by this module.
52 | The length of this list must be known at "plan" time.
53 | EOT
54 | default = []
55 | }
56 |
57 | variable "allowed_cidr_blocks" {
58 | type = list(string)
59 | description = <<-EOT
60 | A list of IPv4 CIDRs to allow access to the security group created by this module.
61 | The length of this list must be known at "plan" time.
62 | EOT
63 | default = []
64 | }
65 | ## End of optional allowed_* ###########
66 |
67 | variable "security_group_name" {
68 | type = list(string)
69 | description = <<-EOT
70 | The name to assign to the created security group. Must be unique within the VPC.
71 | If not provided, will be derived from the `null-label.context` passed in.
72 | If `create_before_destroy` is true, will be used as a name prefix.
73 | EOT
74 | default = []
75 | }
76 |
77 | variable "security_group_description" {
78 | type = string
79 | description = <<-EOT
80 | The description to assign to the created Security Group.
81 | Warning: Changing the description causes the security group to be replaced.
82 | EOT
83 | default = "Managed by Terraform"
84 | }
85 |
86 | variable "security_group_create_before_destroy" {
87 | type = bool
88 | description = <<-EOT
89 | Set `true` to enable terraform `create_before_destroy` behavior on the created security group.
90 | We only recommend setting this `false` if you are importing an existing security group
91 | that you do not want replaced and therefore need full control over its name.
92 | Note that changing this value will always cause the security group to be replaced.
93 | EOT
94 | default = true
95 | }
96 |
97 | variable "preserve_security_group_id" {
98 | type = bool
99 | description = <<-EOT
100 | When `false` and `security_group_create_before_destroy` is `true`, changes to security group rules
101 | cause a new security group to be created with the new rules, and the existing security group is then
102 | replaced with the new one, eliminating any service interruption.
103 | When `true` or when changing the value (from `false` to `true` or from `true` to `false`),
104 | existing security group rules will be deleted before new ones are created, resulting in a service interruption,
105 | but preserving the security group itself.
106 | **NOTE:** Setting this to `true` does not guarantee the security group will never be replaced,
107 | it only keeps changes to the security group rules from triggering a replacement.
108 | See the [terraform-aws-security-group README](https://github.com/cloudposse/terraform-aws-security-group) for further discussion.
109 | EOT
110 | default = false
111 | }
112 |
113 | variable "security_group_create_timeout" {
114 | type = string
115 | description = "How long to wait for the security group to be created."
116 | default = "10m"
117 | }
118 |
119 | variable "security_group_delete_timeout" {
120 | type = string
121 | description = <<-EOT
122 | How long to retry on `DependencyViolation` errors during security group deletion from
123 | lingering ENIs left by certain AWS services such as Elastic Load Balancing.
124 | EOT
125 | default = "15m"
126 | }
127 |
128 | variable "allow_all_egress" {
129 | type = bool
130 | description = <<-EOT
131 | If `true`, the created security group will allow egress on all ports and protocols to all IP addresses.
132 | If this is false and no egress rules are otherwise specified, then no egress will be allowed.
133 | EOT
134 | default = true
135 | }
136 |
137 | variable "additional_security_group_rules" {
138 | type = list(any)
139 | description = <<-EOT
140 | A list of Security Group rule objects to add to the created security group, in addition to the ones
141 | this module normally creates. (To suppress the module's rules, set `create_security_group` to false
142 | and supply your own security group(s) via `associated_security_group_ids`.)
143 | The keys and values of the objects are fully compatible with the `aws_security_group_rule` resource, except
144 | for `security_group_id` which will be ignored, and the optional "key" which, if provided, must be unique and known at "plan" time.
145 | For more info see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule
146 | and https://github.com/cloudposse/terraform-aws-security-group.
147 | EOT
148 | default = []
149 | }
150 |
151 | #### We do not expose an `additional_security_group_rule_matrix` input for a few reasons:
152 | # - It is a convenience and ultimately provides no rules that cannot be provided via `additional_security_group_rules`
153 | # - It is complicated and can, in some situations, create problems for Terraform `for_each`
154 | # - It is difficult to document and easy to make mistakes using it
155 |
156 |
157 | ## vpc_id is required, but if needed for reasons other than the security group,
158 | ## it should be defined in the main `variables.tf` file, not here.
159 | variable "vpc_id" {
160 | type = string
161 | description = "The ID of the VPC where the Security Group will be created."
162 | }
163 |
164 |
165 | #
166 | #
167 | #### The variables below (but not the outputs) can be omitted if not needed, and may need their descriptions modified
168 | #
169 | #
170 |
171 | #############################################################################################
172 | ## Special note about inline_rules_enabled and revoke_rules_on_delete
173 | ##
174 | ## The security-group inputs inline_rules_enabled and revoke_rules_on_delete should not
175 | ## be exposed in other modules unless there is a strong reason for them to be used.
176 | ## We discourage the use of inline_rules_enabled and we rarely need or want
177 | ## revoke_rules_on_delete, so we do not want to clutter our interface with those inputs.
178 | ##
179 | ## If someone wants to enable either of those options, they have the option
180 | ## of creating a security group configured as they like
181 | ## and passing it in as the target security group.
182 | #############################################################################################
183 |
184 | variable "inline_rules_enabled" {
185 | type = bool
186 | description = <<-EOT
187 | NOT RECOMMENDED. Create rules "inline" instead of as separate `aws_security_group_rule` resources.
188 | See [#20046](https://github.com/hashicorp/terraform-provider-aws/issues/20046) for one of several issues with inline rules.
189 | See [this post](https://github.com/hashicorp/terraform-provider-aws/pull/9032#issuecomment-639545250) for details on the difference between inline rules and rule resources.
190 | EOT
191 | default = false
192 | }
193 |
--------------------------------------------------------------------------------
/test/.gitignore:
--------------------------------------------------------------------------------
1 | .test-harness
2 |
--------------------------------------------------------------------------------
/test/Makefile:
--------------------------------------------------------------------------------
1 | TEST_HARNESS ?= https://github.com/cloudposse/test-harness.git
2 | TEST_HARNESS_BRANCH ?= master
3 | TEST_HARNESS_PATH = $(realpath .test-harness)
4 | BATS_ARGS ?= --tap
5 | BATS_LOG ?= test.log
6 |
7 | # Define a macro to run the tests
8 | define RUN_TESTS
9 | @echo "Running tests in $(1)"
10 | @cd $(1) && bats $(BATS_ARGS) $(addsuffix .bats,$(addprefix $(TEST_HARNESS_PATH)/test/terraform/,$(TESTS)))
11 | endef
12 |
13 | default: all
14 |
15 | -include Makefile.*
16 |
17 | ## Provision the test-harnesss
18 | .test-harness:
19 | [ -d $@ ] || git clone --depth=1 -b $(TEST_HARNESS_BRANCH) $(TEST_HARNESS) $@
20 |
21 | ## Initialize the tests
22 | init: .test-harness
23 |
24 | ## Install all dependencies (OS specific)
25 | deps::
26 | @exit 0
27 |
28 | ## Clean up the test harness
29 | clean:
30 | [ "$(TEST_HARNESS_PATH)" == "/" ] || rm -rf $(TEST_HARNESS_PATH)
31 |
32 | ## Run all tests
33 | all: module examples/complete
34 |
35 | ## Run basic sanity checks against the module itself
36 | module: export TESTS ?= installed lint get-modules module-pinning get-plugins provider-pinning validate terraform-docs input-descriptions output-descriptions
37 | module: deps
38 | $(call RUN_TESTS, ../)
39 |
40 | ## Run tests against example
41 | examples/complete: export TESTS ?= installed lint get-modules get-plugins validate
42 | examples/complete: deps
43 | $(call RUN_TESTS, ../$@)
44 |
--------------------------------------------------------------------------------
/test/Makefile.alpine:
--------------------------------------------------------------------------------
1 | ifneq (,$(wildcard /sbin/apk))
2 | ## Install all dependencies for alpine
3 | deps:: init
4 | @apk add --update terraform-docs@cloudposse json2hcl@cloudposse
5 | endif
6 |
--------------------------------------------------------------------------------
/test/src/.gitignore:
--------------------------------------------------------------------------------
1 | .gopath
2 | vendor/
3 |
--------------------------------------------------------------------------------
/test/src/Makefile:
--------------------------------------------------------------------------------
1 | export TF_CLI_ARGS_init ?= -get-plugins=true
2 | export TERRAFORM_VERSION ?= $(shell curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version' | cut -d. -f1-2)
3 |
4 | .DEFAULT_GOAL : all
5 | .PHONY: all
6 |
7 | ## Default target
8 | all: test
9 |
10 | .PHONY : init
11 | ## Initialize tests
12 | init:
13 | @exit 0
14 |
15 | .PHONY : test
16 | ## Run tests
17 | test: init
18 | go mod download
19 | go test -v -timeout 60m
20 |
21 | ## Run tests in docker container
22 | docker/test:
23 | docker run --name terratest --rm -it -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_SESSION_TOKEN -e GITHUB_TOKEN \
24 | -e PATH="/usr/local/terraform/$(TERRAFORM_VERSION)/bin:/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
25 | -v $(CURDIR)/../../:/module/ cloudposse/test-harness:latest -C /module/test/src test
26 |
27 | .PHONY : clean
28 | ## Clean up files
29 | clean:
30 | rm -rf ../../examples/complete/*.tfstate*
31 |
--------------------------------------------------------------------------------
/test/src/examples_complete_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "regexp"
5 | "strings"
6 | "testing"
7 |
8 | "github.com/gruntwork-io/terratest/modules/random"
9 | "github.com/gruntwork-io/terratest/modules/terraform"
10 | testStructure "github.com/gruntwork-io/terratest/modules/test-structure"
11 | "github.com/stretchr/testify/assert"
12 | "k8s.io/apimachinery/pkg/util/runtime"
13 | )
14 |
15 | func TestExamplesComplete(t *testing.T) {
16 | t.Parallel()
17 | randID := strings.ToLower(random.UniqueId())
18 | attributes := []string{randID}
19 |
20 | rootFolder := "../../"
21 | terraformFolderRelativeToRoot := "examples/complete"
22 | varFiles := []string{"fixtures.us-east-2.tfvars"}
23 |
24 | tempTestFolder := testStructure.CopyTerraformFolderToTemp(t, rootFolder, terraformFolderRelativeToRoot)
25 |
26 | terraformOptions := &terraform.Options{
27 | // The path to where our Terraform code is located
28 | TerraformDir: tempTestFolder,
29 | Upgrade: true,
30 | // Variables to pass to our Terraform code using -var-file options
31 | VarFiles: varFiles,
32 | Vars: map[string]interface{}{
33 | "attributes": attributes,
34 | },
35 | }
36 |
37 | // At the end of the test, run `terraform destroy` to clean up any resources that were created
38 | defer cleanup(t, terraformOptions, tempTestFolder)
39 |
40 | // If Go runtime crushes, run `terraform destroy` to clean up any resources that were created
41 | defer runtime.HandleCrash(func(i interface{}) {
42 | cleanup(t, terraformOptions, tempTestFolder)
43 | })
44 |
45 | // This will run `terraform init` and `terraform apply` and fail the test if there are any errors
46 | terraform.InitAndApply(t, terraformOptions)
47 |
48 | // Run `terraform output` to get the value of an output variable
49 | outputClusterName := terraform.Output(t, terraformOptions, "cluster_name")
50 | // Verify we're getting back the outputs we expect
51 | assert.Equal(t, "eg-ue2-test-msk-test-"+randID, outputClusterName)
52 |
53 | // Run `terraform output` to get the value of an output variable
54 | outputSecurityGroupName := terraform.Output(t, terraformOptions, "security_group_name")
55 | // Verify we're getting back the outputs we expect
56 | assert.Contains(t, outputSecurityGroupName, "eg-ue2-test-msk-test-"+randID)
57 | }
58 |
59 | func TestExamplesCompleteDisabled(t *testing.T) {
60 | t.Parallel()
61 | randID := strings.ToLower(random.UniqueId())
62 | attributes := []string{randID}
63 |
64 | rootFolder := "../../"
65 | terraformFolderRelativeToRoot := "examples/complete"
66 | varFiles := []string{"fixtures.us-east-2.tfvars"}
67 |
68 | tempTestFolder := testStructure.CopyTerraformFolderToTemp(t, rootFolder, terraformFolderRelativeToRoot)
69 |
70 | terraformOptions := &terraform.Options{
71 | // The path to where our Terraform code is located
72 | TerraformDir: tempTestFolder,
73 | Upgrade: true,
74 | // Variables to pass to our Terraform code using -var-file options
75 | VarFiles: varFiles,
76 | Vars: map[string]interface{}{
77 | "attributes": attributes,
78 | "enabled": false,
79 | },
80 | }
81 |
82 | // At the end of the test, run `terraform destroy` to clean up any resources that were created
83 | defer cleanup(t, terraformOptions, tempTestFolder)
84 |
85 | // This will run `terraform init` and `terraform apply` and fail the test if there are any errors
86 | results := terraform.InitAndApply(t, terraformOptions)
87 |
88 | // Should complete successfully without creating or changing any resources.
89 | // Extract the "Resources:" section of the output to make the error message more readable.
90 | re := regexp.MustCompile(`Resources: [^.]+\.`)
91 | match := re.FindString(results)
92 | assert.Equal(t, "Resources: 0 added, 0 changed, 0 destroyed.", match, "Re-applying the same configuration should not change any resources")
93 | }
94 |
--------------------------------------------------------------------------------
/test/src/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/cloudposse/terraform-aws-msk-apache-kafka-cluster
2 |
3 | go 1.20
4 |
5 | require (
6 | github.com/gruntwork-io/terratest v0.41.24
7 | github.com/stretchr/testify v1.8.1
8 | k8s.io/apimachinery v0.20.6
9 | )
10 |
11 | require (
12 | cloud.google.com/go v0.110.0 // indirect
13 | cloud.google.com/go/compute v1.19.1 // indirect
14 | cloud.google.com/go/compute/metadata v0.2.3 // indirect
15 | cloud.google.com/go/iam v0.13.0 // indirect
16 | cloud.google.com/go/storage v1.28.1 // indirect
17 | github.com/agext/levenshtein v1.2.3 // indirect
18 | github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
19 | github.com/aws/aws-sdk-go v1.44.122 // indirect
20 | github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
21 | github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
22 | github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
23 | github.com/davecgh/go-spew v1.1.1 // indirect
24 | github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
25 | github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
26 | github.com/go-logr/logr v0.2.0 // indirect
27 | github.com/go-sql-driver/mysql v1.4.1 // indirect
28 | github.com/gogo/protobuf v1.3.2 // indirect
29 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
30 | github.com/golang/protobuf v1.5.3 // indirect
31 | github.com/google/go-cmp v0.5.9 // indirect
32 | github.com/google/gofuzz v1.1.0 // indirect
33 | github.com/google/uuid v1.3.0 // indirect
34 | github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
35 | github.com/googleapis/gax-go/v2 v2.7.1 // indirect
36 | github.com/googleapis/gnostic v0.4.1 // indirect
37 | github.com/gruntwork-io/go-commons v0.8.0 // indirect
38 | github.com/hashicorp/errwrap v1.0.0 // indirect
39 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
40 | github.com/hashicorp/go-getter v1.7.5 // indirect
41 | github.com/hashicorp/go-multierror v1.1.0 // indirect
42 | github.com/hashicorp/go-safetemp v1.0.0 // indirect
43 | github.com/hashicorp/go-version v1.6.0 // indirect
44 | github.com/hashicorp/hcl/v2 v2.9.1 // indirect
45 | github.com/hashicorp/terraform-json v0.13.0 // indirect
46 | github.com/imdario/mergo v0.3.11 // indirect
47 | github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect
48 | github.com/jmespath/go-jmespath v0.4.0 // indirect
49 | github.com/json-iterator/go v1.1.11 // indirect
50 | github.com/klauspost/compress v1.15.11 // indirect
51 | github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect
52 | github.com/mitchellh/go-homedir v1.1.0 // indirect
53 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect
54 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect
55 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
56 | github.com/modern-go/reflect2 v1.0.1 // indirect
57 | github.com/pmezard/go-difflib v1.0.0 // indirect
58 | github.com/pquerna/otp v1.2.0 // indirect
59 | github.com/russross/blackfriday/v2 v2.1.0 // indirect
60 | github.com/spf13/pflag v1.0.5 // indirect
61 | github.com/tmccombs/hcl2json v0.3.3 // indirect
62 | github.com/ulikunitz/xz v0.5.10 // indirect
63 | github.com/urfave/cli v1.22.2 // indirect
64 | github.com/zclconf/go-cty v1.9.1 // indirect
65 | go.opencensus.io v0.24.0 // indirect
66 | golang.org/x/crypto v0.17.0 // indirect
67 | golang.org/x/net v0.10.0 // indirect
68 | golang.org/x/oauth2 v0.7.0 // indirect
69 | golang.org/x/sys v0.15.0 // indirect
70 | golang.org/x/term v0.15.0 // indirect
71 | golang.org/x/text v0.14.0 // indirect
72 | golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
73 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
74 | google.golang.org/api v0.114.0 // indirect
75 | google.golang.org/appengine v1.6.7 // indirect
76 | google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
77 | google.golang.org/grpc v1.56.3 // indirect
78 | google.golang.org/protobuf v1.30.0 // indirect
79 | gopkg.in/inf.v0 v0.9.1 // indirect
80 | gopkg.in/yaml.v2 v2.4.0 // indirect
81 | gopkg.in/yaml.v3 v3.0.1 // indirect
82 | k8s.io/api v0.20.6 // indirect
83 | k8s.io/client-go v0.20.6 // indirect
84 | k8s.io/klog/v2 v2.4.0 // indirect
85 | k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
86 | sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect
87 | sigs.k8s.io/yaml v1.2.0 // indirect
88 | )
89 |
--------------------------------------------------------------------------------
/test/src/utils.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "github.com/gruntwork-io/terratest/modules/terraform"
5 | "github.com/stretchr/testify/assert"
6 | "os"
7 | "testing"
8 | )
9 |
10 | func cleanup(t *testing.T, terraformOptions *terraform.Options, tempTestFolder string) {
11 | terraform.Destroy(t, terraformOptions)
12 | err := os.RemoveAll(tempTestFolder)
13 | assert.NoError(t, err)
14 | }
15 |
--------------------------------------------------------------------------------
/variables.tf:
--------------------------------------------------------------------------------
1 | variable "kafka_version" {
2 | type = string
3 | description = <<-EOT
4 | The desired Kafka software version.
5 | Refer to https://docs.aws.amazon.com/msk/latest/developerguide/supported-kafka-versions.html for more details
6 | EOT
7 | nullable = false
8 | }
9 |
10 | variable "broker_instance_type" {
11 | type = string
12 | description = "The instance type to use for the Kafka brokers"
13 | nullable = false
14 | }
15 |
16 | variable "broker_per_zone" {
17 | type = number
18 | default = 1
19 | description = "Number of Kafka brokers per zone"
20 | validation {
21 | condition = var.broker_per_zone > 0
22 | error_message = "The broker_per_zone value must be at least 1."
23 | }
24 | nullable = false
25 | }
26 |
27 | variable "broker_volume_size" {
28 | type = number
29 | default = 1000
30 | description = "The size in GiB of the EBS volume for the data drive on each broker node"
31 | nullable = false
32 | }
33 |
34 | variable "subnet_ids" {
35 | type = list(string)
36 | description = "Subnet IDs for Client Broker"
37 | validation {
38 | condition = length(var.subnet_ids) > 0
39 | error_message = "The subnet_ids list must have at least 1 value."
40 | }
41 | nullable = false
42 | }
43 |
44 | variable "zone_id" {
45 | type = string
46 | description = "Route53 DNS Zone ID for MSK broker hostnames"
47 | default = null
48 | }
49 |
50 | variable "broker_dns_records_count" {
51 | type = number
52 | description = <<-EOT
53 | This variable specifies how many DNS records to create for the broker endpoints in the DNS zone provided in the `zone_id` variable.
54 | This corresponds to the total number of broker endpoints created by the module.
55 | Calculate this number by multiplying the `broker_per_zone` variable by the subnet count.
56 | This variable is necessary to prevent the Terraform error:
57 | The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created.
58 | EOT
59 | default = 0
60 | nullable = false
61 | }
62 |
63 | variable "custom_broker_dns_name" {
64 | type = string
65 | description = "Custom Route53 DNS hostname for MSK brokers. Use `%%ID%%` key to specify brokers index in the hostname. Example: `kafka-broker%%ID%%.example.com`"
66 | default = null
67 | }
68 |
69 | variable "client_broker" {
70 | type = string
71 | default = "TLS"
72 | description = "Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT`"
73 | nullable = false
74 | }
75 |
76 | variable "encryption_in_cluster" {
77 | type = bool
78 | default = true
79 | description = "Whether data communication among broker nodes is encrypted"
80 | nullable = false
81 | }
82 |
83 | variable "encryption_at_rest_kms_key_arn" {
84 | type = string
85 | default = ""
86 | description = "You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest"
87 | }
88 |
89 | variable "enhanced_monitoring" {
90 | type = string
91 | default = "DEFAULT"
92 | description = "Specify the desired enhanced MSK CloudWatch monitoring level. Valid values: `DEFAULT`, `PER_BROKER`, and `PER_TOPIC_PER_BROKER`"
93 | nullable = false
94 | }
95 |
96 | variable "certificate_authority_arns" {
97 | type = list(string)
98 | default = []
99 | description = "List of ACM Certificate Authority Amazon Resource Names (ARNs) to be used for TLS client authentication"
100 | nullable = false
101 | }
102 |
103 | variable "client_allow_unauthenticated" {
104 | type = bool
105 | default = false
106 | description = "Enable unauthenticated access"
107 | nullable = false
108 | }
109 |
110 | variable "client_sasl_scram_enabled" {
111 | type = bool
112 | default = false
113 | description = "Enable SCRAM client authentication via AWS Secrets Manager. Cannot be set to `true` at the same time as `client_tls_auth_enabled`"
114 | nullable = false
115 | }
116 |
117 | variable "client_sasl_scram_secret_association_enabled" {
118 | type = bool
119 | default = true
120 | description = "Enable the list of AWS Secrets Manager secret ARNs for SCRAM authentication"
121 | nullable = false
122 | }
123 |
124 | variable "client_sasl_scram_secret_association_arns" {
125 | type = list(string)
126 | default = []
127 | description = "List of AWS Secrets Manager secret ARNs for SCRAM authentication"
128 | nullable = false
129 | }
130 |
131 | variable "client_sasl_iam_enabled" {
132 | type = bool
133 | default = false
134 | description = "Enable client authentication via IAM policies. Cannot be set to `true` at the same time as `client_tls_auth_enabled`"
135 | nullable = false
136 | }
137 |
138 | variable "client_tls_auth_enabled" {
139 | type = bool
140 | default = false
141 | description = "Set `true` to enable the Client TLS Authentication"
142 | nullable = false
143 | }
144 |
145 | variable "jmx_exporter_enabled" {
146 | type = bool
147 | default = false
148 | description = "Set `true` to enable the JMX Exporter"
149 | nullable = false
150 | }
151 |
152 | variable "node_exporter_enabled" {
153 | type = bool
154 | default = false
155 | description = "Set `true` to enable the Node Exporter"
156 | nullable = false
157 | }
158 |
159 | variable "cloudwatch_logs_enabled" {
160 | type = bool
161 | default = false
162 | description = "Indicates whether you want to enable or disable streaming broker logs to Cloudwatch Logs"
163 | nullable = false
164 | }
165 |
166 | variable "cloudwatch_logs_log_group" {
167 | type = string
168 | default = null
169 | description = "Name of the Cloudwatch Log Group to deliver logs to"
170 | }
171 |
172 | variable "firehose_logs_enabled" {
173 | type = bool
174 | default = false
175 | description = "Indicates whether you want to enable or disable streaming broker logs to Kinesis Data Firehose"
176 | nullable = false
177 | }
178 |
179 | variable "firehose_delivery_stream" {
180 | type = string
181 | default = ""
182 | description = "Name of the Kinesis Data Firehose delivery stream to deliver logs to"
183 | }
184 |
185 | variable "s3_logs_enabled" {
186 | type = bool
187 | default = false
188 | description = " Indicates whether you want to enable or disable streaming broker logs to S3"
189 | nullable = false
190 | }
191 |
192 | variable "s3_logs_bucket" {
193 | type = string
194 | default = ""
195 | description = "Name of the S3 bucket to deliver logs to"
196 | }
197 |
198 | variable "s3_logs_prefix" {
199 | type = string
200 | default = ""
201 | description = "Prefix to append to the S3 folder name logs are delivered to"
202 | }
203 |
204 | variable "properties" {
205 | type = map(string)
206 | default = {}
207 | description = "Contents of the server.properties file. Supported properties are documented in the [MSK Developer Guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-configuration-properties.html)"
208 | nullable = false
209 | }
210 |
211 | variable "autoscaling_enabled" {
212 | type = bool
213 | default = true
214 | description = "To automatically expand your cluster's storage in response to increased usage, you can enable this. [More info](https://docs.aws.amazon.com/msk/latest/developerguide/msk-autoexpand.html)"
215 | nullable = false
216 | }
217 |
218 | variable "storage_autoscaling_target_value" {
219 | type = number
220 | default = 60
221 | description = "Percentage of storage used to trigger autoscaled storage increase"
222 | }
223 |
224 | variable "storage_autoscaling_max_capacity" {
225 | type = number
226 | default = null
227 | description = "Maximum size the autoscaling policy can scale storage. Defaults to `broker_volume_size`"
228 | }
229 |
230 | variable "storage_autoscaling_disable_scale_in" {
231 | type = bool
232 | default = false
233 | description = "If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource"
234 | nullable = false
235 | }
236 |
237 | variable "security_group_rule_description" {
238 | type = string
239 | default = "Allow inbound %s traffic"
240 | description = "The description to place on each security group rule. The %s will be replaced with the protocol name"
241 | nullable = false
242 | }
243 |
244 | variable "public_access_enabled" {
245 | type = bool
246 | default = false
247 | description = "Enable public access to MSK cluster (given that all of the requirements are met)"
248 | nullable = false
249 | }
250 |
--------------------------------------------------------------------------------
/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 1.0.0"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 4.0"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------