├── .github ├── ISSUE_TEMPLATE │ ├── bug_report_form.yml │ ├── config.yml │ └── feature_request_form.yml ├── pull_request_template.md ├── release-drafter.yml └── workflows │ ├── auto-release.yml │ ├── docs.yml │ └── lint.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── _data.tf ├── _outputs.tf ├── _variables.tf ├── alb-internal.tf ├── alb-listener-certificates.tf ├── alb.tf ├── asg-scheduler.tf ├── asg.tf ├── cf-exports.tf ├── cf-exports.yml ├── cloudwatch-events-policy.tf ├── cloutwatch-alarms-alb.tf ├── cloutwatch-alarms-asg.tf ├── cloutwatch-alarms-ecs.tf ├── cloutwatch-alarms-efs.tf ├── ec2-launch-template.tf ├── ecs.tf ├── efs.tf ├── example ├── db.tf ├── ecs-app.tf ├── ecs-cluster.tf ├── provider.tf └── variables.tf ├── iam-codedeploy.tf ├── iam-ecs-service.tf ├── iam-ecs-task.tf ├── iam-ecs.tf ├── sg-alb-internal.tf ├── sg-alb.tf ├── sg-ecs-nodes.tf ├── userdata.tpl ├── versions.tf └── waf.tf /.github/ISSUE_TEMPLATE/bug_report_form.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | labels: ["bug"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | - type: input 10 | id: contact 11 | attributes: 12 | label: Contact Details (optional) 13 | description: How can we get in touch with you if we need more info? 14 | placeholder: ex. email@example.com 15 | validations: 16 | required: false 17 | - type: textarea 18 | id: what-happened 19 | attributes: 20 | label: Description. What happened? 21 | description: Also tell us, what did you expect to happen? 22 | placeholder: Tell us what you see! 23 | value: "A bug happened!" 24 | validations: 25 | required: true 26 | - type: textarea 27 | id: steps-to-reproduce 28 | attributes: 29 | label: Steps to reproduce 30 | description: Describe the steps to reproduce. 31 | placeholder: 1. Step 1 ... 32 | 2. Step 2 ... 33 | 3. Step 3 ... 34 | validations: 35 | required: true 36 | - type: textarea 37 | id: expected-behavior 38 | attributes: 39 | label: Expected behavior 40 | description: Describe the expected behavior. 41 | placeholder: The expected behavior is ... 42 | validations: 43 | required: false 44 | - type: textarea 45 | id: actual-behavior 46 | attributes: 47 | label: Actual behavior 48 | description: Describe the actual behavior. 49 | placeholder: The actual behavior is ... 50 | validations: 51 | required: true 52 | - type: dropdown 53 | id: occurrence-frequency 54 | attributes: 55 | label: Occurrence 56 | description: How often this bug occurs? 57 | options: 58 | - Frequently 59 | - Sometimes 60 | - Rarely 61 | validations: 62 | required: true 63 | - type: textarea 64 | id: logs 65 | attributes: 66 | label: Relevant log output 67 | description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. 68 | render: shell 69 | - type: checkboxes 70 | id: terms 71 | attributes: 72 | label: Code of Conduct 73 | description: By submitting this issue, you agree to follow our [Code of Conduct](../blob/master/CODE_OF_CONDUCT.md) 74 | options: 75 | - label: I agree to follow this project's Code of Conduct 76 | required: true -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: DNX One Documentation 4 | url: https://docs.dnx.one 5 | about: At DNX we help your business build better solutions by upgrading how delivery is done, leaving behind manual processes and embracing an automated, cloud-native way of working. 6 | - name: About us 7 | url: https://dnx.solutions/about-us/ 8 | about: Informations about DNX as a company. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request_form.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Suggest an idea for this project 3 | labels: ["feature-request"] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this feature request! 9 | - type: input 10 | id: contact 11 | attributes: 12 | label: Contact Details (optional) 13 | description: How can we get in touch with you if we need more info? 14 | placeholder: ex. email@example.com 15 | validations: 16 | required: false 17 | - type: textarea 18 | id: summary 19 | attributes: 20 | label: Summary 21 | description: Describe the requested feature. 22 | validations: 23 | required: true 24 | - type: textarea 25 | id: motivation 26 | attributes: 27 | label: Motivation 28 | description: Describe the motivation behind this feature request. 29 | validations: 30 | required: true 31 | - type: textarea 32 | id: alternatives 33 | attributes: 34 | label: Alternatives 35 | description: Describe the alternatives you've considered. 36 | validations: 37 | required: false 38 | - type: textarea 39 | id: additional-information 40 | attributes: 41 | label: Additional Context 42 | - type: checkboxes 43 | id: terms 44 | attributes: 45 | label: Code of Conduct 46 | description: By submitting this issue, you agree to follow our Code of Conduct](../blob/master/CODE_OF_CONDUCT.md) 47 | options: 48 | - label: I agree to follow this project's Code of Conduct 49 | required: true -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. If it fixes a bug or resolves a feature request, be sure to link to that issue. 3 | 4 | ## Types of changes 5 | 6 | What types of changes does your code introduce to ? 7 | _Put an `x` in the boxes that apply_ 8 | 9 | - [ ] Bugfix (non-breaking change which fixes an issue) 10 | - [ ] New feature (non-breaking change which adds functionality) 11 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 12 | - [ ] Documentation Update (if none of the other choices apply) 13 | 14 | ## Checklist 15 | 16 | _Put an `x` in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code._ 17 | 18 | - [ ] I have read the CONTRIBUTING.md doc. 19 | - [ ] I have added necessary documentation (if appropriate). 20 | - [ ] Any dependent changes have been merged and published in downstream modules. 21 | 22 | ## Further comments 23 | 24 | If this is a relatively large or complex change, kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc... -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: '$RESOLVED_VERSION' 2 | tag-template: '$RESOLVED_VERSION' 3 | version-template: '$MAJOR.$MINOR.$PATCH' 4 | version-resolver: 5 | major: 6 | labels: 7 | - 'major' 8 | minor: 9 | labels: 10 | - 'minor' 11 | - 'enhancement' 12 | patch: 13 | labels: 14 | - 'auto-update' 15 | - 'patch' 16 | - 'fix' 17 | - 'bugfix' 18 | - 'bug' 19 | - 'hotfix' 20 | default: 'minor' 21 | 22 | categories: 23 | - title: '🚀 Enhancements' 24 | labels: 25 | - 'enhancement' 26 | - 'patch' 27 | - title: '🐛 Bug Fixes' 28 | labels: 29 | - 'fix' 30 | - 'bugfix' 31 | - 'bug' 32 | - 'hotfix' 33 | - title: '🤖 Automatic Updates' 34 | labels: 35 | - 'auto-update' 36 | 37 | change-template: | 38 |
39 | $TITLE @$AUTHOR (#$NUMBER) 40 | $BODY 41 |
42 | template: | 43 | ## What’s Changed 44 | $CHANGES -------------------------------------------------------------------------------- /.github/workflows/auto-release.yml: -------------------------------------------------------------------------------- 1 | name: auto-release 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | # Get PR from merged commit to master 13 | - uses: actions-ecosystem/action-get-merged-pull-request@v1 14 | id: get-merged-pull-request 15 | with: 16 | github_token: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }} 17 | # Drafts your next Release notes as Pull Requests are merged into "master" 18 | - uses: release-drafter/release-drafter@v5 19 | with: 20 | publish: ${{ !contains(steps.get-merged-pull-request.outputs.labels, 'no-release') }} 21 | prerelease: false 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.PUBLIC_REPO_ACCESS_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Generate terraform docs 2 | 3 | on: [pull_request] 4 | jobs: 5 | docs: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v3 9 | with: 10 | ref: ${{ github.head_ref }} 11 | 12 | - name: Render terraform docs inside the README.md and push changes back to pushed branch 13 | uses: DNXLabs/terraform-docs@v1.0.0 14 | with: 15 | tf_docs_working_dir: . 16 | tf_docs_output_file: README.md 17 | tf_docs_output_method: inject 18 | tf_docs_git_push: 'true' 19 | tf_docs_git_commit_message: 'terraform-docs: automated update action' -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: [push] 4 | 5 | jobs: 6 | tflint: 7 | name: Lint 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@master 11 | - name: TFLint 12 | uses: docker://wata727/tflint 13 | 14 | fmt: 15 | name: Code Format 16 | runs-on: ubuntu-latest 17 | container: 18 | image: hashicorp/terraform:latest 19 | steps: 20 | - uses: actions/checkout@master 21 | - run: terraform fmt --recursive -check=true 22 | 23 | validate: 24 | name: Validate 25 | runs-on: ubuntu-latest 26 | container: 27 | image: hashicorp/terraform:latest 28 | steps: 29 | - uses: actions/checkout@master 30 | - name: Validate Code 31 | env: 32 | AWS_REGION: 'us-east-1' 33 | TF_WARN_OUTPUT_ERRORS: 1 34 | TF_VAR_vpc_id: 'vpc-123456' 35 | TF_VAR_subnets: '["subnet-12345a"]' 36 | TF_VAR_workers_ami_id: 'ami-123456' 37 | TF_VAR_cluster_name: 'test_cluster' 38 | run: | 39 | terraform init 40 | terraform validate 41 | - name: Validate Examples 42 | run: | 43 | for example in $(find examples -maxdepth 1 -mindepth 1 -type d); do 44 | cd $example 45 | terraform init 46 | terraform validate 47 | cd - 48 | done 49 | 50 | minimum: 51 | name: Minimum version check 52 | runs-on: ubuntu-latest 53 | container: 54 | image: hashicorp/terraform:0.13.0 55 | steps: 56 | - uses: actions/checkout@master 57 | - name: Validate Code 58 | env: 59 | AWS_REGION: 'us-east-1' 60 | TF_WARN_OUTPUT_ERRORS: 1 61 | run: | 62 | sed -i -e 's/>=/=/' -e 's/ \(\d\+\.\d\+\)"/ \1.0"/' versions.tf 63 | terraform init 64 | terraform validate -var "region=${AWS_REGION}" -var "vpc_id=vpc-123456" -var "subnets=[\"subnet-12345a\"]" -var "workers_ami_id=ami-123456" -var "cluster_ingress_cidrs=[]" -var "cluster_name=test_cluster" -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | 3 | ### Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to make participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ### Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ### Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ### Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ### Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise, unacceptable behavior may be 58 | reported by contacting the project team at contact@dnx.solutions. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via issue, 4 | email, or any other method with the owners of this repository before making a change. 5 | 6 | Please note we have a code of conduct, please follow it in all your interactions over the project. 7 | 8 | ## Reporting Bugs 9 | 10 | This section guides you through submitting a bug report for DNX. Following these guidelines helps maintainers and the community understand your report, reproduce the behavior, and find related reports. 11 | 12 | Before creating bug reports, please check this list as you might find out that you don't need to create one. When you are creating a bug report, please [include as many details as possible](#how-do-i-submit-a-good-bug-report). Fill out [the required template, the information it asks for helps us resolve issues faster. 13 | 14 | 15 | #### How Do I Submit A (Good) Bug Report? 16 | 17 | Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). Create an issue on the repository and provide the following information by filling in the template. 18 | 19 | Explain the problem and include additional details to help maintainers reproduce the problem: 20 | 21 | * **Use a clear and descriptive title** for the issue to identify the problem. 22 | * **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started the module, e.g. which command exactly you used in the terminal. 23 | * **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://docs.github.com/pt/github/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). 24 | * **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. 25 | * **If the problem wasn't triggered by a specific action**, describe what you were doing before the problem happened and share more information using the guidelines below. 26 | 27 | Provide more context by answering these questions: 28 | 29 | * **Did the problem start happening recently** (e.g. after updating to a new version of Terraform) or was this always a problem? 30 | * If the problem started happening recently, **can you reproduce the problem in an older version of Terraform?** What's the most recent version in which the problem doesn't happen? You can download older versions of Terraform from [the releases page](https://github.com/hashicorp/terraform/releases). 31 | * **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under which conditions it normally happens. 32 | * If the problem is related to working with files (e.g. opening and editing files), **does the problem happen for all files and projects or only some?** Does the problem happen only when working with local or remote files (e.g. on network drives), with files of a specific type (e.g. only JavaScript or Python files), with large files or files with very long lines, or with files in a specific encoding? Is there anything else special about the files you are using? 33 | ### Suggesting Enhancements 34 | 35 | This section guides you through submitting an enhancement suggestion for DNX modules, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion and find related suggestions. 36 | 37 | Before creating enhancement suggestions, please check issues as you might find out that you don't need to create one. When you are creating an enhancement suggestion, please include as many details as possible. Fill in the template, including the steps that you imagine you would take if the feature you're requesting existed. 38 | 39 | #### How Do I Submit A (Good) Enhancement Suggestion? 40 | 41 | Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com/features/issues/). Create an issue on that repository and provide the following information: 42 | 43 | * **Use a clear and descriptive title** for the issue to identify the suggestion. 44 | * **Provide a step-by-step description of the suggested enhancement** in as many details as possible. 45 | * **Provide specific examples to demonstrate the steps**. Include copy/pasteable snippets which you use in those examples, as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). 46 | * **Describe the current behavior** and **explain which behavior you expected to see instead** and why. 47 | * **Include screenshots and animated GIFs** which help you demonstrate the steps or point out the part of code which the suggestion is related. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://gitlab.gnome.org/Archive/byzanz) on Linux. 48 | * **Explain why this enhancement would be useful** to most Terraform users and isn't something that can or should be implemented as a community package. 49 | * **Specify which version of Terraform you're using.** You can get the exact version by running `terraform -v` in your terminal. 50 | * **Specify the name and version of the OS you're using.** 51 | 52 | 53 | ## Pull Request Process 54 | 55 | The process described here has several goals: 56 | 57 | - Fix problems that are important to users. 58 | 59 | 1. Update the README.md with details of changes to the interface, this includes new environment variables, exposed ports, useful file locations, and container parameters. 60 | 2. Increase the version numbers in any examples files and the README.md to the new version that this 61 | Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/). 62 | 3. You may merge the Pull Request in once you have the sign-off of two other developers, or if you do not have permission to do that, you may request the second reviewer to merge it for you. 63 | 4. Follow all instructions in [the template](./.github/pull_request_template.md). 64 | 5. Follow the [styleguides](https://docs.dnx.one/docs/style-guide/terraform-style-guide.html). 65 | 6. After you submit your pull request, verify that all [status checks](https://help.github.com/articles/about-status-checks/) are passing.
What if the status checks are failing?If a status check is failing, and you believe that the failure is unrelated to your change, please leave a comment on the pull request explaining why you believe the failure is unrelated. A maintainer will re-run the status check for you. If we conclude that the failure was a false positive, then we will open an issue to track that problem with our status check suite.
66 | 67 | While the prerequisites above must be satisfied prior to have your pull request reviewed, the reviewer(s) may ask you to complete additional design work, tests, or other changes before your pull request can be ultimately accepted. 68 | 69 | ### Your First Code Contribution 70 | 71 | Unsure where to begin contributing to DNX? You can start by looking through these `beginner` and `help-wanted` issues: 72 | 73 | * `beginner` - issues that should only require a few lines of code, and a test or two. 74 | * `help-wanted` - issues which should be a bit more involved than `beginner` issues. 75 | 76 | Both issue lists are sorted by the total number of comments. While not perfect, the number of comments is a reasonable proxy for the impact a given change will have. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-aws-ecs 2 | 3 | [![Lint Status](https://github.com/DNXLabs/terraform-aws-ecs/workflows/Lint/badge.svg)](https://github.com/DNXLabs/terraform-aws-ecs/actions) 4 | [![LICENSE](https://img.shields.io/github/license/DNXLabs/terraform-aws-ecs)](https://github.com/DNXLabs/terraform-aws-ecs/blob/master/LICENSE) 5 | 6 | This terraform module builds an Elastic Container Service(ECS) Cluster in AWS. 7 | 8 | The following resources will be created: 9 | - Elastic File System (EFS) 10 | - Auto Scaling 11 | - CloudWatch alarms for (Application Load Balancer ,Auto Scale,ECS and EFS) 12 | - S3 Bucket to store logs from the application Load Balancer access 13 | - Security groups for (ALB,ALB-INTERNAL,ECS NODES, RDS DB) 14 | - Web Application Firewall (WAF) 15 | - Instances for ECS Workers 16 | - IAM roles and policies for the container instances 17 | 18 | In addition you have the option to create or not : 19 | - Application Load Balancer (ALB) 20 | - alb - An external ALB 21 | - alb_internal - A second internal ALB for private APIs 22 | - alb_only - Deploy only an Application Load Balancer and no cloudFront or not with the cluster 23 | 24 | ## Usage 25 | 26 | ```hcl 27 | module "ecs_apps" { 28 | # source = "git::https://github.com/DNXLabs/terraform-aws-ecs.git?ref=0.1.0" 29 | 30 | name = "${local.workspace["cluster_name"]}" 31 | intance_types = ["t3.large","t2.large","m2.xlarge"] 32 | vpc_id = "${data.aws_vpc.selected.id}" 33 | private_subnet_ids = ["${data.aws_subnet_ids.private.ids}"] 34 | public_subnet_ids = ["${data.aws_subnet_ids.public.ids}"] 35 | secure_subnet_ids = ["${data.aws_subnet_ids.secure.ids}"] 36 | certificate_arn = "${data.aws_acm_certificate.dnx_host.arn}" 37 | on_demand_percentage = 0 38 | asg_min = 1 39 | asg_max = 4 40 | asg_target_capacity = 50 41 | } 42 | ``` 43 | 44 | 45 | 46 | ## Requirements 47 | 48 | | Name | Version | 49 | |------|---------| 50 | | terraform | >= 0.13.0 | 51 | 52 | ## Providers 53 | 54 | | Name | Version | 55 | |------|---------| 56 | | aws | n/a | 57 | | random | n/a | 58 | | tls | n/a | 59 | 60 | ## Inputs 61 | 62 | | Name | Description | Type | Default | Required | 63 | |------|-------------|------|---------|:--------:| 64 | | alarm\_alb\_400\_errors\_threshold | Max threshold of HTTP 4000 errors allowed in a 5 minutes interval (use 0 to disable this alarm). | `number` | `10` | no | 65 | | alarm\_alb\_500\_errors\_threshold | Max threshold of HTTP 500 errors allowed in a 5 minutes interval (use 0 to disable this alarm). | `number` | `10` | no | 66 | | alarm\_alb\_latency\_anomaly\_threshold | ALB Latency anomaly detection width (use 0 to disable this alarm). | `number` | `2` | no | 67 | | alarm\_asg\_high\_cpu\_threshold | Max threshold average CPU percentage allowed in a 2 minutes interval (use 0 to disable this alarm). | `number` | `80` | no | 68 | | alarm\_ecs\_high\_cpu\_threshold | Max threshold average CPU percentage allowed in a 2 minutes interval (use 0 to disable this alarm). | `number` | `80` | no | 69 | | alarm\_ecs\_high\_memory\_threshold | Max threshold average Memory percentage allowed in a 2 minutes interval (use 0 to disable this alarm). | `number` | `80` | no | 70 | | alarm\_efs\_credits\_low\_threshold | Alerts when EFS credits fell below this number in bytes - default 1000000000000 is 1TB of a maximum of 2.31T of credits (use 0 to disable this alarm). | `number` | `1000000000000` | no | 71 | | alarm\_prefix | String prefix for cloudwatch alarms. (Optional) | `string` | `"alarm"` | no | 72 | | alarm\_sns\_topics | Alarm topics to create and alert on ECS instance metrics. | `list` | `[]` | no | 73 | | alb | Whether to deploy an ALB or not with the cluster. | `bool` | `true` | no | 74 | | alb\_drop\_invalid\_header\_fields | Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). | `bool` | `true` | no | 75 | | alb\_enable\_deletion\_protection | Enable deletion protection for ALBs | `bool` | `false` | no | 76 | | alb\_http\_listener | Whether to enable HTTP listeners | `bool` | `true` | no | 77 | | alb\_internal | Deploys a second internal ALB for private APIs. | `bool` | `false` | no | 78 | | alb\_internal\_ssl\_policy | The name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS. | `string` | `"ELBSecurityPolicy-TLS-1-2-Ext-2018-06"` | no | 79 | | alb\_only | Whether to deploy only an alb and no cloudFront or not with the cluster. | `bool` | `false` | no | 80 | | alb\_sg\_allow\_alb\_test\_listener | Whether to allow world access to the test listeners | `bool` | `true` | no | 81 | | alb\_sg\_allow\_egress\_https\_world | Whether to allow ALB to access HTTPS endpoints - needed when using OIDC authentication | `bool` | `true` | no | 82 | | alb\_ssl\_policy | The name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS. | `string` | `"ELBSecurityPolicy-2016-08"` | no | 83 | | alb\_test\_listener | Enables a second listener on ports 8080 and 8443 for a phased deploy/cutover (blue/green) | `bool` | `true` | no | 84 | | architecture | Architecture to select the AMI, x86\_64 or arm64 | `string` | `"x86_64"` | no | 85 | | asg\_capacity\_rebalance | Indicates whether capacity rebalance is enabled | `bool` | `false` | no | 86 | | asg\_max | Max number of instances for autoscaling group. | `number` | `4` | no | 87 | | asg\_min | Min number of instances for autoscaling group. | `number` | `1` | no | 88 | | asg\_protect\_from\_scale\_in | (Optional) Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no | 89 | | asg\_target\_capacity | Target average capacity percentage for the ECS capacity provider to track for autoscaling. | `number` | `70` | no | 90 | | autoscaling\_default\_cooldown | The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. | `number` | `300` | no | 91 | | autoscaling\_health\_check\_grace\_period | The length of time that Auto Scaling waits before checking an instance's health status. The grace period begins when an instance comes into service. | `number` | `300` | no | 92 | | backup | Assing a backup tag to efs resource - Backup will be performed by AWS Backup. | `string` | `"true"` | no | 93 | | certificate\_arn | n/a | `any` | n/a | yes | 94 | | certificate\_internal\_arn | certificate arn for internal ALB. | `string` | `""` | no | 95 | | code\_deploy | Enables CodeDeploy role to be used for deployment | `bool` | `true` | no | 96 | | container\_insights | Enables CloudWatch Container Insights for a cluster. | `bool` | `false` | no | 97 | | create\_efs | Enables creation of EFS volume for cluster | `bool` | `true` | no | 98 | | create\_iam\_service\_linked\_role | Create iam\_service\_linked\_role for ECS or not. | `bool` | `false` | no | 99 | | ebs\_key\_arn | ARN of a KMS Key to use on EBS volumes | `string` | `""` | no | 100 | | ec2\_key\_enabled | Generate a SSH private key and include in launch template of ECS nodes | `bool` | `false` | no | 101 | | efs\_key\_arn | ARN of a KMS Key to use on EFS volumes | `string` | `""` | no | 102 | | efs\_lifecycle\_transition\_to\_ia | Option to enable EFS Lifecycle Transaction to IA | `string` | `""` | no | 103 | | efs\_lifecycle\_transition\_to\_primary\_storage\_class | Option to enable EFS Lifecycle Transaction to Primary Storage Class | `bool` | `false` | no | 104 | | enable\_schedule | Enables schedule to shut down and start up instances outside business hours. | `bool` | `false` | no | 105 | | extra\_certificate\_arns | Extra ACM certificates to add to ALB Listeners | `list(string)` | `[]` | no | 106 | | extra\_task\_policies\_arn | Extra policies to add to the task definition permissions | `list(string)` | `[]` | no | 107 | | fargate\_only | Enable when cluster is only for fargate and does not require ASG/EC2/EFS infrastructure | `bool` | `false` | no | 108 | | idle\_timeout | IDLE time for ALB on seconds. | `number` | `400` | no | 109 | | instance\_types | Instance type for ECS workers | `list(any)` | `[]` | no | 110 | | instance\_volume\_size | Volume size for docker volume (in GB). | `number` | `30` | no | 111 | | instance\_volume\_size\_root | Volume size for root volume (in GB). | `number` | `16` | no | 112 | | lb\_access\_logs\_bucket | Bucket to store logs from lb access. | `string` | `""` | no | 113 | | lb\_access\_logs\_prefix | Bucket prefix to store lb access logs. | `string` | `""` | no | 114 | | name | Name of this ECS cluster. | `any` | n/a | yes | 115 | | on\_demand\_base\_capacity | You can designate a base portion of your total capacity as On-Demand. As the group scales, per your settings, the base portion is provisioned first, while additional On-Demand capacity is percentage-based. | `number` | `0` | no | 116 | | on\_demand\_percentage | Percentage of on-demand intances vs spot. | `number` | `100` | no | 117 | | private\_subnet\_ids | List of private subnet IDs for ECS instances and Internal ALB when enabled. | `list(string)` | n/a | yes | 118 | | provisioned\_throughput\_in\_mibps | The throughput, measured in MiB/s, that you want to provision for the file system. | `number` | `0` | no | 119 | | public\_subnet\_ids | List of public subnet IDs for ECS ALB. | `list(string)` | n/a | yes | 120 | | schedule\_cron\_start | Cron expression to define when to trigger a start of the auto-scaling group. E.g. '0 20 \* \* \*' to start at 8pm GMT time. | `string` | `""` | no | 121 | | schedule\_cron\_stop | Cron expression to define when to trigger a stop of the auto-scaling group. E.g. '0 10 \* \* \*' to stop at 10am GMT time. | `string` | `""` | no | 122 | | secure\_subnet\_ids | List of secure subnet IDs for EFS. | `list(string)` | n/a | yes | 123 | | security\_group\_ecs\_nodes\_outbound\_cidrs | ECS Nodes outbound allowed CIDRs for the security group. | `list(string)` |
[
"0.0.0.0/0"
]
| no | 124 | | security\_group\_ids | Extra security groups for instances. | `list(string)` | `[]` | no | 125 | | tags | Map of tags that will be added to created resources. By default resources will be tagged with terraform=true. | `map(string)` | `{}` | no | 126 | | target\_group\_arns | List of target groups for ASG to register. | `list(string)` | `[]` | no | 127 | | throughput\_mode | Throughput mode for the file system. Defaults to bursting. Valid values: bursting, provisioned. | `string` | `"bursting"` | no | 128 | | userdata | Extra commands to pass to userdata. | `string` | `""` | no | 129 | | volume\_type | The EBS volume type | `string` | `"gp2"` | no | 130 | | vpc\_id | VPC ID to deploy the ECS cluster. | `any` | n/a | yes | 131 | | vpn\_cidr | Cidr of VPN to grant ssh access to ECS nodes | `list` |
[
"10.37.0.0/16"
]
| no | 132 | | wafv2\_enable | Deploys WAF V2 with Managed rule groups | `bool` | `false` | no | 133 | | wafv2\_managed\_block\_rule\_groups | List of WAF V2 managed rule groups, set to block | `list(string)` | `[]` | no | 134 | | wafv2\_managed\_rule\_groups | List of WAF V2 managed rule groups, set to count | `list(string)` |
[
"AWSManagedRulesCommonRuleSet"
]
| no | 135 | | wafv2\_rate\_limit\_rule | The limit on requests per 5-minute period for a single originating IP address (leave 0 to disable) | `number` | `0` | no | 136 | 137 | ## Outputs 138 | 139 | | Name | Description | 140 | |------|-------------| 141 | | alb\_arn | n/a | 142 | | alb\_dns\_name | n/a | 143 | | alb\_id | n/a | 144 | | alb\_internal\_arn | n/a | 145 | | alb\_internal\_dns\_name | n/a | 146 | | alb\_internal\_id | n/a | 147 | | alb\_internal\_listener\_https\_arn | n/a | 148 | | alb\_internal\_listener\_test\_traffic\_arn | n/a | 149 | | alb\_internal\_secgrp\_id | n/a | 150 | | alb\_internal\_zone\_id | n/a | 151 | | alb\_listener\_https\_arn | n/a | 152 | | alb\_listener\_test\_traffic\_arn | n/a | 153 | | alb\_secgrp\_id | n/a | 154 | | alb\_zone\_id | n/a | 155 | | ecs\_arn | n/a | 156 | | ecs\_codedeploy\_iam\_role\_arn | n/a | 157 | | ecs\_iam\_role\_arn | n/a | 158 | | ecs\_iam\_role\_name | n/a | 159 | | ecs\_id | n/a | 160 | | ecs\_name | n/a | 161 | | ecs\_nodes\_secgrp\_id | n/a | 162 | | ecs\_service\_iam\_role\_arn | n/a | 163 | | ecs\_service\_iam\_role\_name | n/a | 164 | | ecs\_task\_iam\_role\_arn | n/a | 165 | | ecs\_task\_iam\_role\_name | n/a | 166 | | efs\_fs\_id | n/a | 167 | | private\_key\_pem | n/a | 168 | 169 | 170 | 171 | ## WAF V2 Managed rule groups 172 | 173 | The official documentation with the list of groups and individual rules is available here: (https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-list.html). 174 | 175 | By default, only the Core rule set (a.k.a Common rules) is deployed with WAF, if you want to customise and add more managed groups to the Web ACL you can find the list of groups expected by Terraform following this developer guide: (https://docs.aws.amazon.com/waf/latest/developerguide/waf-using-managed-rule-groups.html). 176 | 177 | ## Authors 178 | 179 | Module managed by [DNX Solutions](https://github.com/DNXLabs). 180 | 181 | ## License 182 | 183 | Apache 2 Licensed. See [LICENSE](https://github.com/DNXLabs/terraform-aws-ecs/blob/master/LICENSE) for full details. 184 | -------------------------------------------------------------------------------- /_data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | 3 | data "aws_partition" "current" {} 4 | 5 | data "aws_ami" "amzn" { 6 | most_recent = true 7 | owners = ["amazon"] 8 | 9 | filter { 10 | name = "name" 11 | values = ["amzn2-ami-ecs-hvm*"] 12 | } 13 | 14 | filter { 15 | name = "architecture" 16 | values = [var.architecture] 17 | } 18 | 19 | name_regex = ".+-ebs$" 20 | } 21 | 22 | data "aws_subnet" "private_subnets" { 23 | count = length(var.private_subnet_ids) 24 | id = var.private_subnet_ids[count.index] 25 | } 26 | 27 | data "aws_caller_identity" "current" {} 28 | data "aws_iam_account_alias" "current" { 29 | count = var.alarm_prefix == "" ? 1 : 0 30 | } 31 | 32 | #------- 33 | # KMS 34 | data "aws_kms_key" "ebs" { 35 | key_id = "alias/aws/ebs" 36 | } 37 | 38 | data "aws_kms_key" "efs" { 39 | key_id = "alias/aws/elasticfilesystem" 40 | } 41 | 42 | data "aws_ec2_managed_prefix_list" "s3" { 43 | name = "com.amazonaws.${data.aws_region.current.name}.s3" 44 | } 45 | -------------------------------------------------------------------------------- /_outputs.tf: -------------------------------------------------------------------------------- 1 | output "alb_id" { 2 | value = aws_lb.ecs.*.id 3 | } 4 | 5 | output "alb_arn" { 6 | value = aws_lb.ecs.*.arn 7 | } 8 | 9 | output "alb_dns_name" { 10 | value = aws_lb.ecs.*.dns_name 11 | } 12 | 13 | output "alb_zone_id" { 14 | value = aws_lb.ecs.*.zone_id 15 | } 16 | 17 | output "alb_internal_id" { 18 | value = aws_lb.ecs_internal.*.id 19 | } 20 | 21 | output "alb_internal_arn" { 22 | value = aws_lb.ecs_internal.*.arn 23 | } 24 | 25 | output "alb_internal_dns_name" { 26 | value = aws_lb.ecs_internal.*.dns_name 27 | } 28 | 29 | output "alb_internal_zone_id" { 30 | value = aws_lb.ecs_internal.*.zone_id 31 | } 32 | 33 | output "ecs_iam_role_arn" { 34 | value = try(aws_iam_role.ecs[0].arn, "") 35 | } 36 | 37 | output "ecs_iam_role_name" { 38 | value = try(aws_iam_role.ecs[0].name, "") 39 | } 40 | 41 | output "ecs_service_iam_role_arn" { 42 | value = aws_iam_role.ecs_service.arn 43 | } 44 | 45 | output "ecs_codedeploy_iam_role_arn" { 46 | value = try(aws_iam_role.codedeploy_service[0].arn, "") 47 | } 48 | 49 | output "ecs_service_iam_role_name" { 50 | value = aws_iam_role.ecs_service.name 51 | } 52 | 53 | output "ecs_task_iam_role_arn" { 54 | value = aws_iam_role.ecs_task.arn 55 | } 56 | 57 | output "ecs_task_iam_role_name" { 58 | value = aws_iam_role.ecs_task.name 59 | } 60 | 61 | output "ecs_id" { 62 | value = aws_ecs_cluster.ecs.id 63 | } 64 | 65 | output "ecs_arn" { 66 | value = aws_ecs_cluster.ecs.arn 67 | } 68 | 69 | output "ecs_name" { 70 | value = aws_ecs_cluster.ecs.name 71 | } 72 | 73 | output "alb_listener_https_arn" { 74 | value = try(aws_lb_listener.ecs_https[0].arn, "") 75 | } 76 | 77 | output "alb_listener_test_traffic_arn" { 78 | value = try(aws_lb_listener.ecs_test_https[0].arn, "") 79 | } 80 | 81 | output "alb_internal_listener_https_arn" { 82 | value = try(aws_lb_listener.ecs_https_internal[0].arn, "") 83 | } 84 | 85 | output "alb_internal_listener_test_traffic_arn" { 86 | value = try(aws_lb_listener.ecs_test_https_internal.*.arn, "") 87 | } 88 | 89 | output "ecs_nodes_secgrp_id" { 90 | value = aws_security_group.ecs_nodes.id 91 | } 92 | 93 | output "alb_secgrp_id" { 94 | value = aws_security_group.alb.*.id 95 | } 96 | 97 | output "alb_internal_secgrp_id" { 98 | value = try(aws_security_group.alb_internal[0].id, "") 99 | } 100 | 101 | output "efs_fs_id" { 102 | value = try(aws_efs_file_system.ecs[0].id, "") 103 | } 104 | 105 | output "private_key_pem" { 106 | value = try(tls_private_key.algorithm[0].private_key_pem, "") 107 | } 108 | -------------------------------------------------------------------------------- /_variables.tf: -------------------------------------------------------------------------------- 1 | # == REQUIRED VARS 2 | 3 | variable "name" { 4 | description = "Name of this ECS cluster." 5 | } 6 | 7 | variable "instance_types" { 8 | description = "Instance type for ECS workers" 9 | type = list(any) 10 | default = [] 11 | } 12 | 13 | variable "architecture" { 14 | default = "x86_64" 15 | description = "Architecture to select the AMI, x86_64 or arm64" 16 | } 17 | variable "volume_type" { 18 | default = "gp2" 19 | description = "The EBS volume type" 20 | } 21 | 22 | variable "tags" { 23 | description = "Map of tags that will be added to created resources. By default resources will be tagged with terraform=true." 24 | type = map(string) 25 | default = {} 26 | } 27 | variable "on_demand_percentage" { 28 | description = "Percentage of on-demand intances vs spot." 29 | default = 100 30 | } 31 | 32 | variable "on_demand_base_capacity" { 33 | description = "You can designate a base portion of your total capacity as On-Demand. As the group scales, per your settings, the base portion is provisioned first, while additional On-Demand capacity is percentage-based." 34 | default = 0 35 | } 36 | 37 | variable "vpc_id" { 38 | description = "VPC ID to deploy the ECS cluster." 39 | } 40 | 41 | variable "private_subnet_ids" { 42 | type = list(string) 43 | description = "List of private subnet IDs for ECS instances and Internal ALB when enabled." 44 | } 45 | 46 | variable "public_subnet_ids" { 47 | type = list(string) 48 | description = "List of public subnet IDs for ECS ALB." 49 | } 50 | 51 | variable "secure_subnet_ids" { 52 | type = list(string) 53 | description = "List of secure subnet IDs for EFS." 54 | } 55 | 56 | variable "certificate_arn" {} 57 | 58 | variable "extra_certificate_arns" { 59 | type = list(string) 60 | description = "Extra ACM certificates to add to ALB Listeners" 61 | default = [] 62 | } 63 | 64 | # == OPTIONAL VARS 65 | 66 | variable "security_group_ids" { 67 | type = list(string) 68 | default = [] 69 | description = "Extra security groups for instances." 70 | } 71 | 72 | variable "security_group_ecs_nodes_outbound_cidrs" { 73 | type = list(string) 74 | default = ["0.0.0.0/0"] 75 | description = "ECS Nodes outbound allowed CIDRs for the security group." 76 | } 77 | 78 | variable "userdata" { 79 | default = "" 80 | description = "Extra commands to pass to userdata." 81 | } 82 | 83 | variable "alb" { 84 | default = true 85 | description = "Whether to deploy an ALB or not with the cluster." 86 | } 87 | 88 | variable "alb_http_listener" { 89 | default = true 90 | description = "Whether to enable HTTP listeners" 91 | } 92 | 93 | variable "alb_sg_allow_alb_test_listener" { 94 | default = true 95 | description = "Whether to allow world access to the test listeners" 96 | } 97 | 98 | variable "alb_sg_allow_egress_https_world" { 99 | default = true 100 | description = "Whether to allow ALB to access HTTPS endpoints - needed when using OIDC authentication" 101 | } 102 | 103 | variable "alb_only" { 104 | default = false 105 | description = "Whether to deploy only an alb and no cloudFront or not with the cluster." 106 | } 107 | 108 | variable "alb_internal" { 109 | default = false 110 | description = "Deploys a second internal ALB for private APIs." 111 | } 112 | 113 | variable "alb_enable_deletion_protection" { 114 | default = false 115 | description = "Enable deletion protection for ALBs" 116 | } 117 | 118 | variable "certificate_internal_arn" { 119 | default = "" 120 | description = "certificate arn for internal ALB." 121 | } 122 | 123 | variable "alb_ssl_policy" { 124 | default = "ELBSecurityPolicy-2016-08" 125 | type = string 126 | description = "The name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS." 127 | } 128 | 129 | variable "alb_internal_ssl_policy" { 130 | default = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06" 131 | type = string 132 | description = "The name of the SSL Policy for the listener. Required if protocol is HTTPS or TLS." 133 | } 134 | 135 | variable "alb_drop_invalid_header_fields" { 136 | default = true 137 | type = bool 138 | description = "Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false)." 139 | } 140 | 141 | variable "asg_min" { 142 | default = 1 143 | description = "Min number of instances for autoscaling group." 144 | } 145 | 146 | variable "asg_max" { 147 | default = 4 148 | description = "Max number of instances for autoscaling group." 149 | } 150 | 151 | variable "asg_protect_from_scale_in" { 152 | default = false 153 | description = "(Optional) Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." 154 | } 155 | 156 | variable "asg_target_capacity" { 157 | default = 70 158 | description = "Target average capacity percentage for the ECS capacity provider to track for autoscaling." 159 | } 160 | 161 | variable "alarm_sns_topics" { 162 | default = [] 163 | description = "Alarm topics to create and alert on ECS instance metrics." 164 | } 165 | 166 | variable "alarm_asg_high_cpu_threshold" { 167 | description = "Max threshold average CPU percentage allowed in a 2 minutes interval (use 0 to disable this alarm)." 168 | default = 80 169 | } 170 | 171 | variable "alarm_ecs_high_memory_threshold" { 172 | description = "Max threshold average Memory percentage allowed in a 2 minutes interval (use 0 to disable this alarm)." 173 | default = 80 174 | } 175 | 176 | variable "alarm_ecs_high_cpu_threshold" { 177 | description = "Max threshold average CPU percentage allowed in a 2 minutes interval (use 0 to disable this alarm)." 178 | default = 80 179 | } 180 | 181 | variable "alarm_alb_latency_anomaly_threshold" { 182 | description = "ALB Latency anomaly detection width (use 0 to disable this alarm)." 183 | default = 2 184 | } 185 | 186 | variable "alarm_alb_500_errors_threshold" { 187 | description = "Max threshold of HTTP 500 errors allowed in a 5 minutes interval (use 0 to disable this alarm)." 188 | default = 10 189 | } 190 | 191 | variable "alarm_alb_400_errors_threshold" { 192 | description = "Max threshold of HTTP 4000 errors allowed in a 5 minutes interval (use 0 to disable this alarm)." 193 | default = 10 194 | } 195 | 196 | variable "alarm_efs_credits_low_threshold" { 197 | description = "Alerts when EFS credits fell below this number in bytes - default 1000000000000 is 1TB of a maximum of 2.31T of credits (use 0 to disable this alarm)." 198 | default = 1000000000000 199 | } 200 | 201 | variable "target_group_arns" { 202 | default = [] 203 | type = list(string) 204 | description = "List of target groups for ASG to register." 205 | } 206 | 207 | variable "autoscaling_health_check_grace_period" { 208 | default = 300 209 | description = "The length of time that Auto Scaling waits before checking an instance's health status. The grace period begins when an instance comes into service." 210 | } 211 | 212 | variable "autoscaling_default_cooldown" { 213 | default = 300 214 | description = "The amount of time, in seconds, after a scaling activity completes before another scaling activity can start." 215 | } 216 | 217 | variable "instance_volume_size" { 218 | description = "Volume size for docker volume (in GB)." 219 | default = 30 220 | } 221 | 222 | variable "instance_volume_size_root" { 223 | description = "Volume size for root volume (in GB)." 224 | default = 16 225 | } 226 | 227 | variable "lb_access_logs_bucket" { 228 | type = string 229 | default = "" 230 | description = "Bucket to store logs from lb access." 231 | } 232 | 233 | variable "lb_access_logs_prefix" { 234 | type = string 235 | default = "" 236 | description = "Bucket prefix to store lb access logs." 237 | } 238 | 239 | variable "enable_schedule" { 240 | default = false 241 | description = "Enables schedule to shut down and start up instances outside business hours." 242 | } 243 | 244 | variable "schedule_cron_start" { 245 | type = string 246 | default = "" 247 | description = "Cron expression to define when to trigger a start of the auto-scaling group. E.g. '0 20 * * *' to start at 8pm GMT time." 248 | } 249 | 250 | variable "schedule_cron_stop" { 251 | type = string 252 | default = "" 253 | description = "Cron expression to define when to trigger a stop of the auto-scaling group. E.g. '0 10 * * *' to stop at 10am GMT time." 254 | } 255 | 256 | variable "backup" { 257 | type = string 258 | default = "true" 259 | description = "Assing a backup tag to efs resource - Backup will be performed by AWS Backup." 260 | } 261 | 262 | variable "throughput_mode" { 263 | type = string 264 | default = "bursting" 265 | description = "Throughput mode for the file system. Defaults to bursting. Valid values: bursting, provisioned." 266 | } 267 | 268 | variable "provisioned_throughput_in_mibps" { 269 | default = 0 270 | description = "The throughput, measured in MiB/s, that you want to provision for the file system." 271 | } 272 | 273 | variable "alarm_prefix" { 274 | type = string 275 | description = "String prefix for cloudwatch alarms. (Optional)" 276 | default = "alarm" 277 | } 278 | 279 | variable "ebs_key_arn" { 280 | type = string 281 | description = "ARN of a KMS Key to use on EBS volumes" 282 | default = "" 283 | } 284 | 285 | variable "efs_key_arn" { 286 | type = string 287 | description = "ARN of a KMS Key to use on EFS volumes" 288 | default = "" 289 | } 290 | 291 | variable "wafv2_enable" { 292 | default = false 293 | description = "Deploys WAF V2 with Managed rule groups" 294 | } 295 | 296 | variable "wafv2_managed_rule_groups" { 297 | type = list(string) 298 | default = ["AWSManagedRulesCommonRuleSet"] 299 | description = "List of WAF V2 managed rule groups, set to count" 300 | } 301 | 302 | variable "wafv2_managed_block_rule_groups" { 303 | type = list(string) 304 | default = [] 305 | description = "List of WAF V2 managed rule groups, set to block" 306 | } 307 | 308 | variable "wafv2_rate_limit_rule" { 309 | type = number 310 | default = 0 311 | description = "The limit on requests per 5-minute period for a single originating IP address (leave 0 to disable)" 312 | } 313 | 314 | variable "create_iam_service_linked_role" { 315 | type = bool 316 | default = false 317 | description = "Create iam_service_linked_role for ECS or not." 318 | } 319 | 320 | variable "fargate_only" { 321 | default = false 322 | description = "Enable when cluster is only for fargate and does not require ASG/EC2/EFS infrastructure" 323 | } 324 | 325 | variable "ec2_key_enabled" { 326 | default = false 327 | description = "Generate a SSH private key and include in launch template of ECS nodes" 328 | } 329 | 330 | variable "vpn_cidr" { 331 | default = ["10.37.0.0/16"] 332 | description = "Cidr of VPN to grant ssh access to ECS nodes" 333 | } 334 | 335 | variable "create_efs" { 336 | type = bool 337 | default = true 338 | description = "Enables creation of EFS volume for cluster" 339 | } 340 | 341 | variable "asg_capacity_rebalance" { 342 | type = bool 343 | default = false 344 | description = "Indicates whether capacity rebalance is enabled" 345 | } 346 | 347 | variable "efs_lifecycle_transition_to_ia" { 348 | type = string 349 | default = "" 350 | description = "Option to enable EFS Lifecycle Transaction to IA" 351 | 352 | validation { 353 | condition = contains(["AFTER_7_DAYS", "AFTER_14_DAYS", "AFTER_30_DAYS", "AFTER_60_DAYS", "AFTER_90_DAYS", ""], var.efs_lifecycle_transition_to_ia) 354 | error_message = "Indicates how long it takes to transition files to the IA storage class. Valid values: AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS. Or leave empty if not used." 355 | } 356 | } 357 | 358 | variable "efs_lifecycle_transition_to_primary_storage_class" { 359 | type = bool 360 | default = false 361 | description = "Option to enable EFS Lifecycle Transaction to Primary Storage Class" 362 | } 363 | 364 | variable "extra_task_policies_arn" { 365 | type = list(string) 366 | default = [] 367 | description = "Extra policies to add to the task definition permissions" 368 | } 369 | 370 | variable "container_insights" { 371 | type = bool 372 | default = false 373 | description = "Enables CloudWatch Container Insights for a cluster." 374 | } 375 | 376 | variable "alb_test_listener" { 377 | type = bool 378 | default = true 379 | description = "Enables a second listener on ports 8080 and 8443 for a phased deploy/cutover (blue/green)" 380 | } 381 | 382 | variable "code_deploy" { 383 | type = bool 384 | default = true 385 | description = "Enables CodeDeploy role to be used for deployment" 386 | } 387 | 388 | variable "idle_timeout" { 389 | type = number 390 | default = 400 391 | description = "IDLE time for ALB on seconds." 392 | } -------------------------------------------------------------------------------- /alb-internal.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "ecs_internal" { 2 | count = var.alb_internal ? 1 : 0 3 | 4 | load_balancer_type = "application" 5 | internal = true 6 | name = "ecs-${var.name}-internal" 7 | subnets = var.private_subnet_ids 8 | drop_invalid_header_fields = var.alb_drop_invalid_header_fields 9 | enable_deletion_protection = var.alb_enable_deletion_protection 10 | 11 | security_groups = [ 12 | aws_security_group.alb_internal[0].id, 13 | ] 14 | 15 | idle_timeout = 400 16 | 17 | dynamic "access_logs" { 18 | for_each = compact([var.lb_access_logs_bucket]) 19 | 20 | content { 21 | bucket = var.lb_access_logs_bucket 22 | prefix = var.lb_access_logs_prefix 23 | enabled = true 24 | } 25 | } 26 | 27 | tags = merge( 28 | var.tags, 29 | { 30 | "Terraform" = true, 31 | Name = "ecs-${var.name}-internal" 32 | }, 33 | ) 34 | } 35 | 36 | resource "aws_lb_listener" "ecs_https_internal" { 37 | count = var.alb_internal ? 1 : 0 38 | 39 | load_balancer_arn = aws_lb.ecs_internal[0].arn 40 | port = "443" 41 | protocol = "HTTPS" 42 | ssl_policy = var.alb_internal_ssl_policy 43 | certificate_arn = var.certificate_internal_arn != "" ? var.certificate_internal_arn : var.certificate_arn 44 | 45 | default_action { 46 | type = "forward" 47 | target_group_arn = aws_lb_target_group.ecs_default_https_internal[0].arn 48 | } 49 | 50 | tags = merge( 51 | var.tags, 52 | { 53 | "Terraform" = true 54 | }, 55 | ) 56 | } 57 | 58 | resource "aws_lb_listener" "ecs_test_https_internal" { 59 | count = var.alb_internal && var.alb_test_listener ? 1 : 0 60 | 61 | load_balancer_arn = aws_lb.ecs_internal[0].arn 62 | port = "8443" 63 | protocol = "HTTPS" 64 | ssl_policy = var.alb_internal_ssl_policy 65 | certificate_arn = var.certificate_internal_arn != "" ? var.certificate_internal_arn : var.certificate_arn 66 | 67 | default_action { 68 | type = "forward" 69 | #target_group_arn = aws_lb_target_group.ecs_replacement_https[0].arn 70 | target_group_arn = aws_lb_target_group.ecs_default_https_internal[0].arn 71 | } 72 | tags = merge( 73 | var.tags, 74 | { 75 | "Terraform" = true 76 | }, 77 | ) 78 | } 79 | 80 | # Generate a random string to add it to the name of the Target Group 81 | resource "random_string" "alb_internal_prefix" { 82 | count = var.alb_internal ? 1 : 0 83 | length = 4 84 | upper = false 85 | special = false 86 | } 87 | 88 | resource "aws_lb_target_group" "ecs_default_https_internal" { 89 | count = var.alb_internal ? 1 : 0 90 | 91 | name = replace(substr("ecs-${var.name}-int-default-https-${random_string.alb_internal_prefix[0].result}", 0, 32), "/-+$/", "") 92 | port = 80 93 | protocol = "HTTP" 94 | vpc_id = var.vpc_id 95 | tags = merge( 96 | var.tags, 97 | { 98 | "Terraform" = true 99 | }, 100 | ) 101 | 102 | lifecycle { 103 | create_before_destroy = true 104 | } 105 | } 106 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /alb-listener-certificates.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb_listener_certificate" "alb" { 2 | count = var.alb ? length(var.extra_certificate_arns) : 0 3 | listener_arn = element(aws_lb_listener.ecs_https.*.arn, 0) 4 | certificate_arn = var.extra_certificate_arns[count.index] 5 | } 6 | 7 | resource "aws_lb_listener_certificate" "alb_internal" { 8 | count = var.alb_internal ? length(var.extra_certificate_arns) : 0 9 | listener_arn = element(aws_lb_listener.ecs_https_internal.*.arn, 0) 10 | certificate_arn = var.extra_certificate_arns[count.index] 11 | } 12 | 13 | -------------------------------------------------------------------------------- /alb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "ecs" { 2 | count = var.alb ? 1 : 0 3 | 4 | load_balancer_type = "application" 5 | internal = false 6 | name = "ecs-${var.name}" 7 | subnets = var.public_subnet_ids 8 | drop_invalid_header_fields = var.alb_drop_invalid_header_fields 9 | enable_deletion_protection = var.alb_enable_deletion_protection 10 | 11 | security_groups = [ 12 | aws_security_group.alb[0].id, 13 | ] 14 | 15 | idle_timeout = var.idle_timeout 16 | 17 | dynamic "access_logs" { 18 | for_each = compact([var.lb_access_logs_bucket]) 19 | 20 | content { 21 | bucket = var.lb_access_logs_bucket 22 | prefix = var.lb_access_logs_prefix 23 | enabled = true 24 | } 25 | } 26 | 27 | tags = merge( 28 | var.tags, 29 | { 30 | "Terraform" = true, 31 | "Name" = "ecs-${var.name}" 32 | }, 33 | ) 34 | } 35 | 36 | resource "aws_lb_listener" "ecs_https" { 37 | count = var.alb ? 1 : 0 38 | 39 | load_balancer_arn = aws_lb.ecs[0].arn 40 | port = "443" 41 | protocol = "HTTPS" 42 | ssl_policy = var.alb_ssl_policy 43 | certificate_arn = var.certificate_arn 44 | 45 | default_action { 46 | type = "forward" 47 | target_group_arn = aws_lb_target_group.ecs_default_https[0].arn 48 | } 49 | 50 | tags = merge( 51 | var.tags, 52 | { 53 | "Terraform" = true 54 | }, 55 | ) 56 | } 57 | 58 | resource "aws_lb_listener" "ecs_http_redirect" { 59 | count = var.alb && var.alb_http_listener ? 1 : 0 60 | 61 | load_balancer_arn = aws_lb.ecs[0].arn 62 | port = "80" 63 | protocol = "HTTP" 64 | 65 | default_action { 66 | type = "redirect" 67 | 68 | redirect { 69 | port = "443" 70 | protocol = "HTTPS" 71 | status_code = "HTTP_301" 72 | } 73 | } 74 | tags = merge( 75 | var.tags, 76 | { 77 | "Terraform" = true 78 | }, 79 | ) 80 | } 81 | 82 | resource "aws_lb_listener" "ecs_test_https" { 83 | count = var.alb && var.alb_test_listener ? 1 : 0 84 | 85 | load_balancer_arn = aws_lb.ecs[0].arn 86 | port = "8443" 87 | protocol = "HTTPS" 88 | ssl_policy = var.alb_ssl_policy 89 | certificate_arn = var.certificate_arn 90 | 91 | default_action { 92 | type = "forward" 93 | #target_group_arn = aws_lb_target_group.ecs_replacement_https[0].arn 94 | target_group_arn = aws_lb_target_group.ecs_default_https[0].arn 95 | } 96 | tags = merge( 97 | var.tags, 98 | { 99 | "Terraform" = true 100 | }, 101 | ) 102 | } 103 | 104 | resource "aws_lb_listener" "ecs_test_http_redirect" { 105 | count = var.alb && var.alb_http_listener && var.alb_test_listener ? 1 : 0 106 | 107 | load_balancer_arn = aws_lb.ecs[0].arn 108 | port = "8080" 109 | protocol = "HTTP" 110 | 111 | default_action { 112 | type = "redirect" 113 | 114 | redirect { 115 | port = "8443" 116 | protocol = "HTTPS" 117 | status_code = "HTTP_301" 118 | } 119 | } 120 | tags = merge( 121 | var.tags, 122 | { 123 | "Terraform" = true 124 | }, 125 | ) 126 | } 127 | 128 | # Generate a random string to add it to the name of the Target Group 129 | resource "random_string" "alb_prefix" { 130 | length = 4 131 | upper = false 132 | special = false 133 | } 134 | 135 | resource "aws_lb_target_group" "ecs_default_http" { 136 | count = var.alb && var.alb_http_listener ? 1 : 0 137 | 138 | name = replace(substr("ecs-${var.name}-default-http-${random_string.alb_prefix.result}", 0, 32), "/-+$/", "") 139 | port = 80 140 | protocol = "HTTP" 141 | vpc_id = var.vpc_id 142 | 143 | tags = merge( 144 | var.tags, 145 | { 146 | "Terraform" = true 147 | }, 148 | ) 149 | 150 | lifecycle { 151 | create_before_destroy = true 152 | } 153 | } 154 | 155 | resource "aws_lb_target_group" "ecs_default_https" { 156 | count = var.alb ? 1 : 0 157 | 158 | name = replace(substr("ecs-${var.name}-default-https-${random_string.alb_prefix.result}", 0, 32), "/-+$/", "") 159 | port = 80 160 | protocol = "HTTP" 161 | vpc_id = var.vpc_id 162 | 163 | tags = merge( 164 | var.tags, 165 | { 166 | "Terraform" = true 167 | }, 168 | ) 169 | 170 | lifecycle { 171 | create_before_destroy = true 172 | } 173 | } 174 | 175 | 176 | 177 | -------------------------------------------------------------------------------- /asg-scheduler.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_schedule" "ecs_stop" { 2 | count = !var.fargate_only && var.enable_schedule ? 1 : 0 3 | scheduled_action_name = "ecs-${var.name}-stop" 4 | min_size = 0 5 | max_size = 0 6 | desired_capacity = 0 7 | autoscaling_group_name = aws_autoscaling_group.ecs[0].name 8 | recurrence = var.schedule_cron_stop 9 | } 10 | 11 | resource "aws_autoscaling_schedule" "ecs_start" { 12 | count = !var.fargate_only && var.enable_schedule ? 1 : 0 13 | scheduled_action_name = "ecs-${var.name}-start" 14 | min_size = var.asg_min 15 | max_size = var.asg_max 16 | desired_capacity = var.asg_min 17 | autoscaling_group_name = aws_autoscaling_group.ecs[0].name 18 | recurrence = var.schedule_cron_start 19 | } 20 | -------------------------------------------------------------------------------- /asg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_group" "ecs" { 2 | count = var.fargate_only ? 0 : 1 3 | name = "ecs-${var.name}" 4 | 5 | mixed_instances_policy { 6 | launch_template { 7 | launch_template_specification { 8 | launch_template_id = aws_launch_template.ecs[0].id 9 | version = "$Latest" 10 | } 11 | 12 | dynamic "override" { 13 | for_each = var.instance_types 14 | content { 15 | instance_type = override.value 16 | } 17 | } 18 | } 19 | 20 | instances_distribution { 21 | spot_instance_pools = 3 22 | on_demand_base_capacity = var.on_demand_base_capacity 23 | on_demand_percentage_above_base_capacity = var.on_demand_percentage 24 | } 25 | } 26 | 27 | vpc_zone_identifier = var.private_subnet_ids 28 | 29 | min_size = var.asg_min 30 | max_size = var.asg_max 31 | 32 | capacity_rebalance = var.asg_capacity_rebalance 33 | 34 | protect_from_scale_in = var.asg_protect_from_scale_in 35 | 36 | dynamic "tag" { 37 | for_each = merge( 38 | { 39 | Name = "ecs-node-${var.name}", 40 | AmazonECSManaged = "" 41 | }, 42 | var.tags 43 | ) 44 | content { 45 | key = tag.key 46 | value = tag.value 47 | propagate_at_launch = true 48 | } 49 | } 50 | 51 | target_group_arns = var.target_group_arns 52 | health_check_grace_period = var.autoscaling_health_check_grace_period 53 | default_cooldown = var.autoscaling_default_cooldown 54 | lifecycle { 55 | create_before_destroy = true 56 | } 57 | } 58 | 59 | resource "aws_ecs_capacity_provider" "ecs_capacity_provider" { 60 | count = var.fargate_only ? 0 : 1 61 | name = "${var.name}-capacity-provider" 62 | 63 | auto_scaling_group_provider { 64 | auto_scaling_group_arn = aws_autoscaling_group.ecs[0].arn 65 | managed_termination_protection = "DISABLED" 66 | 67 | managed_scaling { 68 | maximum_scaling_step_size = 10 69 | minimum_scaling_step_size = 1 70 | status = "ENABLED" 71 | target_capacity = var.asg_target_capacity 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /cf-exports.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudformation_stack" "tf_exports" { 2 | name = "terraform-exports-ecs-${var.name}" 3 | 4 | template_body = templatefile("${path.module}/cf-exports.yml", { 5 | "name" = var.name, 6 | "vars" = { 7 | "AlbId" = length(aws_lb.ecs[*].id) > 0 ? element(aws_lb.ecs[*].id, 0) : "undefined", 8 | "AlbArn" = length(aws_lb.ecs[*].arn) > 0 ? element(aws_lb.ecs[*].arn, 0) : "undefined", 9 | "AlbDnsName" = length(aws_lb.ecs[*].dns_name) > 0 ? element(aws_lb.ecs[*].dns_name, 0) : "undefined", 10 | "AlbZoneId" = length(aws_lb.ecs[*].zone_id) > 0 ? element(aws_lb.ecs[*].zone_id, 0) : "undefined", 11 | "AlbSecgrpId" = length(aws_security_group.alb[*].id) > 0 ? element(aws_security_group.alb[*].id, 0) : "undefined", 12 | "EcsIamRoleArn" = try(aws_iam_role.ecs[0].arn, "undefined"), 13 | "EcsIamRoleName" = try(aws_iam_role.ecs[0].name, "undefined"), 14 | "EcsServiceIamRoleArn" = aws_iam_role.ecs_service.arn, 15 | "EcsServiceIamRoleName" = aws_iam_role.ecs_service.name, 16 | "EcsTaskIamRoleArn" = aws_iam_role.ecs_task.arn, 17 | "EcsTaskIamRoleName" = aws_iam_role.ecs_task.name, 18 | "EcsId" = aws_ecs_cluster.ecs.*.id[0], 19 | "EcsName" = aws_ecs_cluster.ecs.*.name[0], 20 | "EcsArn" = aws_ecs_cluster.ecs.*.arn[0], 21 | "AlbListenerHttpsArn" = length(aws_lb_listener.ecs_https[*].arn) > 0 ? element(aws_lb_listener.ecs_https[*].arn, 0) : "undefined", 22 | "EcsNodesSecGrpId" = aws_security_group.ecs_nodes.id, 23 | "VpcId" = var.vpc_id 24 | } 25 | }) 26 | } -------------------------------------------------------------------------------- /cf-exports.yml: -------------------------------------------------------------------------------- 1 | --- 2 | AWSTemplateFormatVersion: '2010-09-09' 3 | Description: 'CloudFormation exports' 4 | 5 | Conditions: 6 | HasNot: !Equals [ 'true', 'false' ] 7 | 8 | Resources: 9 | NullResource: 10 | Type: 'Custom::NullResource' 11 | Condition: HasNot 12 | 13 | Outputs: 14 | %{ for k, v in vars ~} 15 | ${k}: 16 | Value: "${v}" 17 | Export: 18 | Name: 'TfExport-Ecs-${name}-${k}' 19 | %{ endfor } 20 | -------------------------------------------------------------------------------- /cloudwatch-events-policy.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "ecs_events" { 2 | statement { 3 | actions = [ 4 | "logs:CreateLogStream", 5 | "logs:PutLogEvents", 6 | "logs:PutLogEventsBatch", 7 | ] 8 | 9 | resources = ["arn:aws:logs:${data.aws_region.current.name}:${data.aws_partition.current.partition}:log-group:/ecs/events/${var.name}/*"] 10 | 11 | principals { 12 | identifiers = ["events.amazonaws.com", "delivery.logs.amazonaws.com"] 13 | type = "Service" 14 | } 15 | } 16 | } 17 | 18 | resource "aws_cloudwatch_log_resource_policy" "ecs_events" { 19 | policy_document = data.aws_iam_policy_document.ecs_events.json 20 | policy_name = "capture-ecs-events-${var.name}" 21 | } 22 | -------------------------------------------------------------------------------- /cloutwatch-alarms-alb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "alb_500_errors" { 2 | count = var.alb && length(var.alarm_sns_topics) > 0 && var.alarm_alb_500_errors_threshold != 0 ? 1 : 0 3 | 4 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-alb-500-errors" 5 | comparison_operator = "GreaterThanOrEqualToThreshold" 6 | evaluation_periods = "2" 7 | metric_name = "HTTPCode_ELB_5XX_Count" 8 | namespace = "AWS/ApplicationELB" 9 | period = "300" 10 | statistic = "Maximum" 11 | threshold = var.alarm_alb_500_errors_threshold 12 | alarm_description = "Number of 500 errors at ALB above threshold" 13 | alarm_actions = var.alarm_sns_topics 14 | ok_actions = var.alarm_sns_topics 15 | insufficient_data_actions = [] 16 | treat_missing_data = "ignore" 17 | 18 | tags = merge( 19 | var.tags, 20 | { 21 | "Terraform" = true 22 | }, 23 | ) 24 | dimensions = { 25 | LoadBalancer = aws_lb.ecs[0].arn_suffix 26 | } 27 | } 28 | 29 | resource "aws_cloudwatch_metric_alarm" "alb_400_errors" { 30 | count = var.alb && length(var.alarm_sns_topics) > 0 && var.alarm_alb_400_errors_threshold != 0 ? 1 : 0 31 | 32 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-alb-400-errors" 33 | comparison_operator = "GreaterThanOrEqualToThreshold" 34 | evaluation_periods = "2" 35 | metric_name = "HTTPCode_ELB_4XX_Count" 36 | namespace = "AWS/ApplicationELB" 37 | period = "300" 38 | statistic = "Maximum" 39 | threshold = var.alarm_alb_400_errors_threshold 40 | alarm_description = "Number of 400 errors at ALB above threshold" 41 | alarm_actions = var.alarm_sns_topics 42 | ok_actions = var.alarm_sns_topics 43 | insufficient_data_actions = [] 44 | treat_missing_data = "ignore" 45 | 46 | tags = merge( 47 | var.tags, 48 | { 49 | "Terraform" = true 50 | }, 51 | ) 52 | dimensions = { 53 | LoadBalancer = aws_lb.ecs[0].arn_suffix 54 | } 55 | } 56 | 57 | resource "aws_cloudwatch_metric_alarm" "alb_latency" { 58 | count = var.alb && length(var.alarm_sns_topics) > 0 && var.alarm_alb_latency_anomaly_threshold > 0 ? 1 : 0 59 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-alb-latency" 60 | comparison_operator = "GreaterThanUpperThreshold" 61 | evaluation_periods = "2" 62 | datapoints_to_alarm = "2" 63 | threshold_metric_id = "ad1" 64 | alarm_description = "Load balancer latency for application" 65 | alarm_actions = var.alarm_sns_topics 66 | ok_actions = var.alarm_sns_topics 67 | insufficient_data_actions = [] 68 | treat_missing_data = "ignore" 69 | 70 | metric_query { 71 | id = "ad1" 72 | expression = "ANOMALY_DETECTION_BAND(m1, ${var.alarm_alb_latency_anomaly_threshold})" 73 | label = "TargetResponseTime (Expected)" 74 | return_data = "true" 75 | } 76 | metric_query { 77 | id = "m1" 78 | return_data = "true" 79 | metric { 80 | metric_name = "TargetResponseTime" 81 | namespace = "AWS/ApplicationELB" 82 | period = "900" 83 | stat = "p90" 84 | 85 | dimensions = { 86 | LoadBalancer = aws_lb.ecs[0].arn_suffix 87 | } 88 | } 89 | } 90 | 91 | tags = merge( 92 | var.tags, 93 | { 94 | "Terraform" = true 95 | }, 96 | ) 97 | } 98 | -------------------------------------------------------------------------------- /cloutwatch-alarms-asg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "asg_high_cpu" { 2 | count = !var.fargate_only && length(var.alarm_sns_topics) > 0 && var.alarm_asg_high_cpu_threshold != 0 ? 1 : 0 3 | 4 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-asg-high-cpu" 5 | comparison_operator = "GreaterThanOrEqualToThreshold" 6 | evaluation_periods = "3" 7 | metric_name = "CPUUtilization" 8 | namespace = "AWS/EC2" 9 | period = "120" 10 | statistic = "Average" 11 | threshold = var.alarm_asg_high_cpu_threshold 12 | alarm_description = "ASG CPU above threshold" 13 | alarm_actions = var.alarm_sns_topics 14 | ok_actions = var.alarm_sns_topics 15 | insufficient_data_actions = [] 16 | treat_missing_data = "ignore" 17 | 18 | dimensions = { 19 | AutoScalingGroupName = aws_autoscaling_group.ecs[0].name 20 | } 21 | 22 | tags = merge( 23 | var.tags, 24 | { 25 | "Terraform" = true 26 | }, 27 | ) 28 | } 29 | -------------------------------------------------------------------------------- /cloutwatch-alarms-ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "ecs_high_memory" { 2 | count = !var.fargate_only && length(var.alarm_sns_topics) > 0 && var.alarm_ecs_high_cpu_threshold != 0 ? 1 : 0 3 | 4 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-high-memory" 5 | comparison_operator = "GreaterThanOrEqualToThreshold" 6 | evaluation_periods = "3" 7 | metric_name = "MemoryUtilization" 8 | namespace = "AWS/ECS" 9 | period = "120" 10 | statistic = "Average" 11 | threshold = var.alarm_ecs_high_memory_threshold 12 | alarm_description = "Cluster memory above threshold" 13 | alarm_actions = var.alarm_sns_topics 14 | ok_actions = var.alarm_sns_topics 15 | insufficient_data_actions = [] 16 | treat_missing_data = "ignore" 17 | 18 | dimensions = { 19 | ClusterName = aws_ecs_cluster.ecs.name 20 | } 21 | 22 | tags = merge( 23 | var.tags, 24 | { 25 | "Terraform" = true 26 | }, 27 | ) 28 | } 29 | 30 | resource "aws_cloudwatch_metric_alarm" "ecs_high_cpu" { 31 | count = !var.fargate_only && length(var.alarm_sns_topics) > 0 && var.alarm_ecs_high_cpu_threshold != 0 ? 1 : 0 32 | 33 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-high-cpu" 34 | comparison_operator = "GreaterThanOrEqualToThreshold" 35 | evaluation_periods = "3" 36 | metric_name = "CPUUtilization" 37 | namespace = "AWS/ECS" 38 | period = "120" 39 | statistic = "Average" 40 | threshold = var.alarm_ecs_high_cpu_threshold 41 | alarm_description = "Cluster CPU above threshold" 42 | alarm_actions = var.alarm_sns_topics 43 | ok_actions = var.alarm_sns_topics 44 | insufficient_data_actions = [] 45 | treat_missing_data = "ignore" 46 | 47 | dimensions = { 48 | ClusterName = aws_ecs_cluster.ecs.name 49 | } 50 | 51 | tags = merge( 52 | var.tags, 53 | { 54 | "Terraform" = true 55 | }, 56 | ) 57 | } 58 | -------------------------------------------------------------------------------- /cloutwatch-alarms-efs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "efs_credits_low" { 2 | count = !var.fargate_only && length(var.alarm_sns_topics) > 0 && var.alarm_efs_credits_low_threshold != 0 ? 1 : 0 3 | 4 | alarm_name = "${try(data.aws_iam_account_alias.current[0].account_alias, var.alarm_prefix)}-ecs-${var.name}-efs-credits-low" 5 | comparison_operator = "LessThanOrEqualToThreshold" 6 | evaluation_periods = "3" 7 | metric_name = "BurstCreditBalance" 8 | namespace = "AWS/EFS" 9 | period = "120" 10 | statistic = "Average" 11 | threshold = var.alarm_efs_credits_low_threshold 12 | alarm_description = "EFS credits below threshold" 13 | alarm_actions = var.alarm_sns_topics 14 | ok_actions = var.alarm_sns_topics 15 | insufficient_data_actions = [] 16 | treat_missing_data = "ignore" 17 | 18 | dimensions = { 19 | FileSystemId = aws_efs_file_system.ecs[0].id 20 | } 21 | 22 | tags = merge( 23 | var.tags, 24 | { 25 | "Terraform" = true 26 | }, 27 | ) 28 | } 29 | -------------------------------------------------------------------------------- /ec2-launch-template.tf: -------------------------------------------------------------------------------- 1 | resource "aws_launch_template" "ecs" { 2 | count = var.fargate_only ? 0 : 1 3 | name_prefix = "ecs-${var.name}-" 4 | image_id = data.aws_ami.amzn.image_id 5 | instance_type = length(var.instance_types) == 0 ? "t2.micro" : var.instance_types[0] 6 | 7 | iam_instance_profile { 8 | name = aws_iam_instance_profile.ecs[0].name 9 | } 10 | 11 | block_device_mappings { 12 | device_name = "/dev/xvda" 13 | 14 | ebs { 15 | volume_size = var.instance_volume_size 16 | encrypted = true 17 | volume_type = var.volume_type 18 | kms_key_id = var.ebs_key_arn != "" ? var.ebs_key_arn : null 19 | } 20 | } 21 | 22 | vpc_security_group_ids = concat([aws_security_group.ecs_nodes.id], var.security_group_ids) 23 | 24 | user_data = base64encode(templatefile("${path.module}/userdata.tpl", { 25 | tf_cluster_name = var.name 26 | tf_efs_id = try(aws_efs_file_system.ecs[0].id, "") 27 | userdata_extra = var.userdata 28 | })) 29 | 30 | key_name = var.ec2_key_enabled ? aws_key_pair.generated_key[0].key_name : null 31 | 32 | lifecycle { 33 | create_before_destroy = true 34 | } 35 | 36 | tags = merge( 37 | var.tags, 38 | { 39 | "Terraform" = true 40 | }, 41 | ) 42 | } 43 | 44 | resource "tls_private_key" "algorithm" { 45 | count = var.ec2_key_enabled ? 1 : 0 46 | algorithm = "RSA" 47 | rsa_bits = 4096 48 | 49 | } 50 | 51 | resource "aws_key_pair" "generated_key" { 52 | count = var.ec2_key_enabled ? 1 : 0 53 | key_name = "${var.name}-key" 54 | public_key = tls_private_key.algorithm[0].public_key_openssh 55 | 56 | tags = merge( 57 | var.tags, 58 | { 59 | "Terraform" = true 60 | }, 61 | ) 62 | } 63 | -------------------------------------------------------------------------------- /ecs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ecs_cluster" "ecs" { 2 | name = var.name 3 | 4 | setting { 5 | name = "containerInsights" 6 | value = var.container_insights ? "enabled" : "disabled" 7 | } 8 | 9 | tags = merge( 10 | var.tags, 11 | { 12 | "Terraform" = true 13 | }, 14 | ) 15 | lifecycle { 16 | ignore_changes = [] 17 | 18 | } 19 | } 20 | 21 | resource "aws_ecs_cluster_capacity_providers" "ecs" { 22 | cluster_name = aws_ecs_cluster.ecs.name 23 | 24 | capacity_providers = compact([ 25 | try(aws_ecs_capacity_provider.ecs_capacity_provider[0].name, ""), 26 | "FARGATE", 27 | "FARGATE_SPOT" 28 | ]) 29 | } 30 | -------------------------------------------------------------------------------- /efs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_efs_file_system" "ecs" { 2 | count = var.create_efs ? 1 : 0 3 | creation_token = "ecs-${var.name}" 4 | encrypted = true 5 | kms_key_id = var.efs_key_arn != "" ? var.efs_key_arn : null 6 | 7 | throughput_mode = var.throughput_mode 8 | provisioned_throughput_in_mibps = var.provisioned_throughput_in_mibps 9 | 10 | dynamic "lifecycle_policy" { 11 | for_each = var.efs_lifecycle_transition_to_ia != "" ? [1] : [] 12 | content { 13 | transition_to_ia = var.efs_lifecycle_transition_to_ia 14 | } 15 | } 16 | 17 | dynamic "lifecycle_policy" { 18 | for_each = var.efs_lifecycle_transition_to_primary_storage_class ? [1] : [] 19 | content { 20 | transition_to_primary_storage_class = "AFTER_1_ACCESS" 21 | } 22 | } 23 | 24 | tags = merge( 25 | var.tags, 26 | { 27 | Name = "ecs-${var.name}" 28 | Backup = var.backup 29 | }, 30 | ) 31 | 32 | # lifecycle { 33 | # prevent_destroy = true 34 | # } 35 | } 36 | 37 | resource "aws_efs_mount_target" "ecs" { 38 | count = var.create_efs ? length(var.secure_subnet_ids) : 0 39 | file_system_id = aws_efs_file_system.ecs[0].id 40 | subnet_id = element(var.secure_subnet_ids, count.index) 41 | 42 | security_groups = [ 43 | aws_security_group.efs[0].id 44 | ] 45 | 46 | lifecycle { 47 | ignore_changes = [subnet_id] 48 | } 49 | } 50 | 51 | resource "aws_security_group" "efs" { 52 | count = var.create_efs ? 1 : 0 53 | name = "ecs-${var.name}-efs" 54 | description = "for EFS to talk to ECS cluster" 55 | vpc_id = var.vpc_id 56 | 57 | 58 | tags = merge( 59 | var.tags, 60 | { 61 | Name = "ecs-efs-${var.name}" 62 | Backup = var.backup 63 | }, 64 | ) 65 | } 66 | 67 | resource "aws_security_group_rule" "nfs_from_ecs_to_efs" { 68 | count = var.create_efs ? 1 : 0 69 | description = "ECS to EFS" 70 | type = "ingress" 71 | from_port = 2049 72 | to_port = 2049 73 | protocol = "tcp" 74 | security_group_id = aws_security_group.efs[0].id 75 | source_security_group_id = aws_security_group.ecs_nodes.id 76 | } 77 | -------------------------------------------------------------------------------- /example/db.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "rds_apps_password" { 2 | length = 34 3 | special = false 4 | } 5 | 6 | resource "aws_rds_cluster" "apps" { 7 | cluster_identifier = "${local.workspace["db_name"]}-cluster" 8 | engine = "aurora" 9 | engine_mode = "serverless" 10 | db_subnet_group_name = local.workspace["db_subnet"] 11 | vpc_security_group_ids = [aws_security_group.rds_apps.id] 12 | master_username = "master" 13 | master_password = random_string.rds_apps_password.result 14 | backup_retention_period = local.workspace["db_retention"] 15 | apply_immediately = true 16 | 17 | scaling_configuration { 18 | auto_pause = true 19 | max_capacity = 4 20 | min_capacity = 2 21 | seconds_until_auto_pause = 300 22 | } 23 | 24 | lifecycle { 25 | ignore_changes = ["master_password"] 26 | } 27 | } 28 | 29 | resource "aws_security_group" "rds_apps" { 30 | name = "rds-${local.workspace["cluster_name"]}" 31 | vpc_id = local.workspace["vpc_id"] 32 | 33 | lifecycle { 34 | create_before_destroy = true 35 | } 36 | 37 | ingress { 38 | from_port = 3306 39 | to_port = 3306 40 | protocol = "tcp" 41 | security_groups = [module.ecs_apps.ecs_nodes_secgrp_id] 42 | description = "From ECS Nodes" 43 | } 44 | } 45 | 46 | output "rds_host" { 47 | value = aws_rds_cluster.apps.endpoint 48 | } 49 | 50 | output "rds_creds" { 51 | value = "${aws_rds_cluster.apps.master_username}/${random_string.rds_apps_password.result}" 52 | } 53 | -------------------------------------------------------------------------------- /example/ecs-app.tf: -------------------------------------------------------------------------------- 1 | module "ecs_app_wordpress_01" { 2 | source = "git::https://github.com/DNXLabs/terraform-aws-ecs-app.git?ref=1.5.0" 3 | vpc_id = local.workspace["vpc_id"] 4 | cluster_name = module.ecs_apps.ecs_name 5 | service_role_arn = module.ecs_apps.ecs_service_iam_role_arn 6 | task_role_arn = module.ecs_apps.ecs_task_iam_role_arn 7 | alb_listener_https_arn = element(module.ecs_apps.alb_listener_https_arn, 0) 8 | alb_dns_name = element(module.ecs_apps.alb_dns_name, 0) 9 | name = "wordpress-01" 10 | image = "nginxdemos/hello:latest" 11 | container_port = 80 12 | hostname = "wp01.labs.dnx.host" # signed by cf_certificate_arn 13 | hostname_blue = "wp01-blue.labs.dnx.host" # signed by cf_certificate_arn 14 | hostname_origin = "wp01-origin.labs.dnx.host" # signed by alb_certificate_arn 15 | hosted_zone = "labs.dnx.host" 16 | certificate_arn = local.workspace["cf_certificate_arn"] # goes on cloudfront 17 | 18 | # use these values for Wordpress 19 | healthcheck_path = "/readme.html" 20 | service_health_check_grace_period_seconds = 120 21 | } 22 | -------------------------------------------------------------------------------- /example/ecs-cluster.tf: -------------------------------------------------------------------------------- 1 | module "ecs_apps" { 2 | source = "git::https://github.com/DNXLabs/terraform-aws-ecs.git?ref=0.2.0" 3 | name = local.workspace["cluster_name"] 4 | instance_type_1 = "t3.large" 5 | instance_type_2 = "t2.large" 6 | instance_type_3 = "m2.xlarge" 7 | vpc_id = local.workspace["vpc_id"] 8 | private_subnet_ids = [split(",", local.workspace["private_subnet_ids"])] 9 | public_subnet_ids = [split(",", local.workspace["public_subnet_ids"])] 10 | secure_subnet_ids = [split(",", local.workspace["secure_subnet_ids"])] 11 | certificate_arn = local.workspace["alb_certificate_arn"] 12 | on_demand_percentage = 0 13 | } 14 | -------------------------------------------------------------------------------- /example/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "ap-southeast-2" 3 | } 4 | -------------------------------------------------------------------------------- /example/variables.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | env = { 3 | dev-apps = { 4 | cluster_name = "dev-apps" 5 | alb_certificate_arn = "arn:aws:acm:ap-southeast-2:REDACTED:certificate/REDACTED" 6 | cf_certificate_arn = "arn:aws:acm:us-east-1:REDACTED:certificate/REDACTED" 7 | vpc_id = "vpc-REDACTED" 8 | private_subnet_ids = "subnet-REDACTED,subnet-REDACTED" 9 | public_subnet_ids = "subnet-REDACTED,subnet-REDACTED" 10 | secure_subnet_ids = "subnet-REDACTED,subnet-REDACTED" 11 | db_subnet = "REDACTED" 12 | db_name = "dev-apps" 13 | db_retention = 5 14 | } 15 | 16 | prod-apps = { 17 | cluster_name = "prod-apps" 18 | alb_certificate_arn = "arn:aws:acm:ap-southeast-2:REDACTED:certificate/REDACTED" 19 | cf_certificate_arn = "arn:aws:acm:us-east-1:REDACTED:certificate/REDACTED" 20 | vpc_id = "vpc-REDACTED" 21 | private_subnet_ids = "subnet-REDACTED,subnet-REDACTED" 22 | public_subnet_ids = "subnet-REDACTED,subnet-REDACTED" 23 | secure_subnet_ids = "subnet-REDACTED,subnet-REDACTED" 24 | db_subnet = "REDACTED" 25 | db_name = "prod-apps" 26 | db_retention = 30 27 | } 28 | } 29 | 30 | workspace = local.env[terraform.workspace] 31 | } 32 | -------------------------------------------------------------------------------- /iam-codedeploy.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "codedeploy_service" { 2 | count = var.code_deploy ? 1 : 0 3 | name = "codedeploy-service-${var.name}-${data.aws_region.current.name}" 4 | tags = merge( 5 | var.tags, 6 | { 7 | "terraform" = "true" 8 | }, 9 | ) 10 | assume_role_policy = <> /etc/rc.local 10 | echo "ulimit -n 1048576" >> /etc/rc.local 11 | echo "vm.max_map_count=262144" >> /etc/sysctl.conf 12 | echo "fs.file-max=65536" >> /etc/sysctl.conf 13 | /sbin/sysctl -p /etc/sysctl.conf 14 | 15 | 16 | echo "### INSTALL PACKAGES" 17 | yum update -y 18 | yum install -y amazon-efs-utils aws-cli 19 | 20 | 21 | echo "### SETUP AGENT" 22 | 23 | echo "ECS_CLUSTER=${tf_cluster_name}" >> /etc/ecs/ecs.config 24 | echo "ECS_ENABLE_SPOT_INSTANCE_DRAINING=true" >> /etc/ecs/ecs.config 25 | 26 | 27 | echo "### SETUP EFS" 28 | EFS_DIR=/mnt/efs 29 | EFS_ID=${tf_efs_id} 30 | 31 | if [ -n "$EFS_ID" ]; then 32 | mkdir -p $${EFS_DIR} 33 | echo "$${EFS_ID}:/ $${EFS_DIR} efs tls,_netdev" >> /etc/fstab 34 | for i in $(seq 1 20); do mount -a -t efs defaults && break || sleep 60; done 35 | else 36 | echo "EFS_ID is empty. Skipping EFS setup." 37 | fi 38 | 39 | echo "### EXTRA USERDATA" 40 | ${userdata_extra} 41 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.0" 3 | } -------------------------------------------------------------------------------- /waf.tf: -------------------------------------------------------------------------------- 1 | resource "aws_wafv2_web_acl" "waf_alb" { 2 | count = var.alb && var.wafv2_enable ? 1 : 0 3 | name = "waf-${var.name}-web-application" 4 | description = "WAF managed rules for web applications v2" 5 | scope = "REGIONAL" 6 | 7 | default_action { 8 | allow {} 9 | } 10 | 11 | dynamic "rule" { 12 | for_each = local.wafv2_rules 13 | 14 | content { 15 | name = "waf-${var.name}-${rule.value.type}-${rule.value.name}" 16 | priority = rule.key 17 | 18 | dynamic "override_action" { 19 | for_each = rule.value.type == "managed" ? [1] : [] 20 | content { 21 | count {} 22 | } 23 | } 24 | 25 | dynamic "action" { 26 | for_each = rule.value.type == "rate" ? [1] : [] 27 | content { 28 | block {} 29 | } 30 | } 31 | 32 | statement { 33 | dynamic "rate_based_statement" { 34 | for_each = rule.value.type == "rate" ? [1] : [] 35 | content { 36 | limit = rule.value.value 37 | aggregate_key_type = "IP" 38 | } 39 | } 40 | 41 | dynamic "managed_rule_group_statement" { 42 | for_each = rule.value.type == "managed" || rule.value.type == "managed_block" ? [1] : [] 43 | content { 44 | name = rule.value.name 45 | vendor_name = "AWS" 46 | } 47 | } 48 | } 49 | 50 | 51 | visibility_config { 52 | cloudwatch_metrics_enabled = true 53 | metric_name = "waf-${var.name}-${rule.value.type}-${rule.value.name}" 54 | sampled_requests_enabled = false 55 | } 56 | } 57 | } 58 | 59 | tags = merge( 60 | var.tags, 61 | { 62 | terraform = "true" 63 | Name = "waf-${var.name}-web-application" 64 | }, 65 | ) 66 | 67 | visibility_config { 68 | cloudwatch_metrics_enabled = true 69 | metric_name = "waf-${var.name}-general" 70 | sampled_requests_enabled = false 71 | } 72 | } 73 | 74 | locals { 75 | wafv2_managed_rule_groups = [for i, v in var.wafv2_managed_rule_groups : { "name" : v, "type" : "managed" }] 76 | wafv2_managed_block_rule_groups = [for i, v in var.wafv2_managed_block_rule_groups : { "name" : v, "type" : "managed_block" }] 77 | wafv2_rate_limit_rule = var.wafv2_rate_limit_rule == 0 ? [] : [{ 78 | "name" : "limit" 79 | "type" : "rate" 80 | "value" : var.wafv2_rate_limit_rule 81 | }] 82 | wafv2_rules = concat(local.wafv2_rate_limit_rule, local.wafv2_managed_block_rule_groups, local.wafv2_managed_rule_groups) 83 | } 84 | 85 | resource "aws_wafv2_web_acl_association" "waf_alb_association" { 86 | count = var.alb && var.wafv2_enable ? 1 : 0 87 | resource_arn = aws_lb.ecs[0].arn 88 | web_acl_arn = aws_wafv2_web_acl.waf_alb[0].arn 89 | } --------------------------------------------------------------------------------