├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ ├── pr-title.yaml │ ├── stale.yaml │ └── static-analysis.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .tflint.hcl ├── .tfsec └── config.json ├── LICENSE ├── Makefile ├── README.md ├── alb.tf ├── cloudwatch_logs.tf ├── container_definition.tf ├── data.tf ├── docs ├── ECS Deployer.drawio ├── docs └── ecs_deployer.png ├── envoy.tf ├── examples ├── complete │ ├── README.md │ ├── data.tf │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── variables.tf │ └── versions.tf └── fixtures │ └── context │ ├── Dockerfile │ ├── index.html │ └── server.py ├── fluentbit.tf ├── iam.tf ├── main.tf ├── modules ├── deployment │ ├── backend.tf │ ├── code_build.tf │ ├── code_pipeline.tf │ ├── data.tf │ ├── iam_code_build.tf │ ├── iam_code_pipeline.tf │ ├── notification.tf │ ├── s3.tf │ ├── trigger.tf │ └── variables.tf └── ecr │ ├── backend.tf │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── otel.tf ├── outputs.tf ├── route53.tf ├── variables.tf └── versions.tf /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @moritzzimmer @thisismana @saefty 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yaml: -------------------------------------------------------------------------------- 1 | name: "pr title" 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | 10 | jobs: 11 | main: 12 | name: validate 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # 5.5.3 16 | env: 17 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 18 | with: 19 | types: | 20 | fix 21 | feat 22 | docs 23 | ci 24 | chore 25 | refactor 26 | requireScope: false 27 | # Configure additional validation for the subject based on a regex. 28 | # This example ensures the subject doesn't start with an uppercase character. 29 | subjectPattern: ^(?![A-Z]).+$ 30 | # If `subjectPattern` is configured, you can use this property to override 31 | # the default error message that is shown when the pattern doesn't match. 32 | # The variables `subject` and `title` can be used within the message. 33 | subjectPatternError: | 34 | The subject "{subject}" found in the pull request title "{title}" 35 | didn't match the configured pattern. Please ensure that the subject 36 | doesn't starts with an uppercase character. 37 | wip: true 38 | validateSingleCommit: false 39 | -------------------------------------------------------------------------------- /.github/workflows/stale.yaml: -------------------------------------------------------------------------------- 1 | name: 'stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v9 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | 14 | days-before-stale: 30 15 | stale-issue-label: stale 16 | stale-pr-label: stale 17 | stale-issue-message: | 18 | This issue has been automatically marked as stale because it has been open 30 days 19 | with no activity. Remove stale label or comment or this issue will be closed in 10 days 20 | stale-pr-message: | 21 | This PR has been automatically marked as stale because it has been open 30 days 22 | with no activity. Remove stale label or comment or this PR will be closed in 10 days 23 | exempt-all-milestones: true 24 | 25 | days-before-close: 10 26 | delete-branch: true 27 | close-issue-message: This issue was automatically closed because of stale in 10 days 28 | close-pr-message: This PR was automatically closed because of stale in 10 days 29 | -------------------------------------------------------------------------------- /.github/workflows/static-analysis.yaml: -------------------------------------------------------------------------------- 1 | name: "static analysis" 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | validate: 13 | name: validate 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | terraform: [ ~1.7 ] 18 | steps: 19 | - uses: actions/checkout@v4 20 | 21 | - uses: actions/cache@v4 22 | with: 23 | path: ~/.tflint.d/plugins 24 | key: ${{ matrix.os }}-tflint-${{ hashFiles('.tflint.hcl') }} 25 | 26 | - uses: hashicorp/setup-terraform@v3 27 | with: 28 | terraform_version: ${{ matrix.terraform }} 29 | 30 | - uses: terraform-linters/setup-tflint@90f302c255ef959cbfb4bd10581afecdb7ece3e6 # v4.1.1 31 | with: 32 | github_token: ${{ secrets.GITHUB_TOKEN }} 33 | 34 | - run: make fmt 35 | 36 | - run: make validate 37 | 38 | - run: make tflint 39 | 40 | - name: trivy config 41 | run: | 42 | cat >> ./trivy.yaml << EOF 43 | # see https://aquasecurity.github.io/trivy/latest/docs/references/configuration/config-file/ for reference 44 | exit-code: 1 45 | exit-on-eol: 1 46 | misconfiguration: 47 | terraform: 48 | exclude-downloaded-modules: true 49 | severity: 50 | - HIGH 51 | - CRITICAL 52 | scan: 53 | skip-dirs: 54 | - "**/.terraform/**/*" 55 | EOF 56 | 57 | cat ./trivy.yaml 58 | 59 | - uses: aquasecurity/trivy-action@0.30.0 60 | with: 61 | scan-type: 'config' 62 | hide-progress: false 63 | trivy-config: trivy.yaml 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | .terraform.lock.hcl 8 | 9 | # Crash log files 10 | crash.log 11 | 12 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 13 | # .tfvars files are managed as part of configuration and so should be included in 14 | # version control. 15 | # 16 | # example.tfvars 17 | 18 | # Ignore override files as they are usually used to override resources locally and so 19 | # are not checked in 20 | override.tf 21 | override.tf.json 22 | *_override.tf 23 | *_override.tf.json 24 | 25 | # Include override files you do wish to add to version control using negated pattern 26 | # 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | .idea/ 33 | /.terraform.lock.hcl 34 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.99.1 4 | hooks: 5 | - id: terraform_fmt 6 | - id: terraform_validate 7 | args: 8 | - --init-args=-backend=false 9 | - id: terraform_tflint 10 | - id: terraform_trivy 11 | args: 12 | - --args=--tf-exclude-downloaded-modules 13 | - --args=--skip-dirs "**/.terraform/**/*" 14 | - --args=--severity=HIGH,CRITICAL 15 | - id: terraform_docs 16 | args: 17 | - '--args=--lockfile=false' 18 | - repo: https://github.com/pre-commit/pre-commit-hooks 19 | rev: v5.0.0 20 | hooks: 21 | - id: check-merge-conflict 22 | - id: trailing-whitespace 23 | - id: end-of-file-fixer 24 | -------------------------------------------------------------------------------- /.tflint.hcl: -------------------------------------------------------------------------------- 1 | config { 2 | call_module_type = "all" 3 | } 4 | 5 | plugin "aws" { 6 | enabled = true 7 | version = "0.40.0" 8 | source = "github.com/terraform-linters/tflint-ruleset-aws" 9 | } 10 | -------------------------------------------------------------------------------- /.tfsec/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "exclude": [ 3 | "aws-cloudwatch-log-group-customer-key", 4 | "aws-ecr-repository-customer-key" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DESCRIBE := $(shell git fetch --all > /dev/null && git describe --match "v*" --always --tags) 2 | DESCRIBE_PARTS := $(subst -, ,$(DESCRIBE)) 3 | # 'v0.2.0' 4 | VERSION_TAG := $(word 1,$(DESCRIBE_PARTS)) 5 | # '0.2.0' 6 | VERSION := $(subst v,,$(VERSION_TAG)) 7 | # '0 2 0' 8 | VERSION_PARTS := $(subst ., ,$(VERSION)) 9 | 10 | MAJOR := $(word 1,$(VERSION_PARTS)) 11 | MINOR := $(word 2,$(VERSION_PARTS)) 12 | PATCH := $(word 3,$(VERSION_PARTS)) 13 | 14 | BUMP ?= patch 15 | ifeq ($(BUMP), major) 16 | NEXT_VERSION := $(shell echo $$(($(MAJOR)+1)).0.0) 17 | else ifeq ($(BUMP), minor) 18 | NEXT_VERSION := $(shell echo $(MAJOR).$$(($(MINOR)+1)).0) 19 | else 20 | NEXT_VERSION := $(shell echo $(MAJOR).$(MINOR).$$(($(PATCH)+1))) 21 | endif 22 | NEXT_TAG := v$(NEXT_VERSION) 23 | 24 | STACKS = $(shell find . -not -path "*/\.*" -iname "*.tf" | sed -E "s|/[^/]+$$||" | sort --unique) 25 | ROOT_DIR := $(shell pwd) 26 | 27 | all: fmt validate tflint trivy 28 | 29 | init: ## Initialize a Terraform working directory 30 | @echo "+ $@" 31 | @terraform init -backend=false > /dev/null 32 | 33 | .PHONY: fmt 34 | fmt: ## Checks config files against canonical format 35 | @echo "+ $@" 36 | @terraform fmt -check=true -recursive 37 | 38 | .PHONY: validate 39 | validate: ## Validates the Terraform files 40 | @echo "+ $@" 41 | @for s in $(STACKS); do \ 42 | echo "validating $$s"; \ 43 | terraform -chdir=$$s init -backend=false > /dev/null; \ 44 | terraform -chdir=$$s validate || exit 1 ;\ 45 | done; 46 | 47 | .PHONY: tflint 48 | tflint: ## Runs tflint on all Terraform files 49 | @echo "+ $@" 50 | @tflint --init 51 | @for s in $(STACKS); do \ 52 | echo "tflint $$s"; \ 53 | terraform -chdir=$$s init -backend=false -lockfile=readonly > /dev/null; \ 54 | tflint --chdir=$$s --format=compact --config=$(ROOT_DIR)/.tflint.hcl || exit 1;\ 55 | done; 56 | 57 | trivy: ## Runs trivy on all Terraform files 58 | @echo "+ $@" 59 | @trivy config --exit-code 1 --severity HIGH --tf-exclude-downloaded-modules . 60 | 61 | bump :: 62 | @echo bumping version from $(VERSION_TAG) to $(NEXT_TAG) 63 | @sed -i '' s/$(VERSION)/$(NEXT_VERSION)/g README.md 64 | 65 | .PHONY: check-git-clean 66 | check-git-clean: 67 | @git diff-index --quiet HEAD || (echo "There are uncomitted changes"; exit 1) 68 | 69 | .PHONY: check-git-branch 70 | check-git-branch: check-git-clean 71 | git fetch --all --tags --prune 72 | git checkout main 73 | 74 | release: check-git-branch bump 75 | git add README.md 76 | git commit -vsam "Bump version to $(NEXT_TAG)" 77 | git tag -a $(NEXT_TAG) -m "$(NEXT_TAG)" 78 | git push origin $(NEXT_TAG) 79 | git push 80 | # create GH release if `gh cli` is installed and authenticated 81 | @if ! command -v gh >/dev/null 2>&1 ; then \ 82 | echo "gh CLI is not installed. Please create the release manually on GitHub." ; \ 83 | exit 0 ; \ 84 | fi; 85 | @if ! gh auth status >/dev/null 2>&1 ; then \ 86 | echo "gh CLI is not authenticated. Please run 'gh auth login' or create the release manually on GitHub." ; \ 87 | exit 0 ; \ 88 | fi; 89 | @gh release create $(NEXT_TAG) --generate-notes 90 | @echo "GitHub release created successfully for tag $(NEXT_TAG) at: https://github.com/stroeer/terraform-aws-ecs-fargate/releases/tag/$(NEXT_TAG)" 91 | 92 | help: ## Display this help screen 93 | @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AWS Fargate ECS Terraform Module 2 | 3 |  [](https://registry.terraform.io/modules/stroeer/ecs-fargate/aws/0.48.0)  [](https://opensource.org/licenses/Apache-2.0) 4 | 5 | Terraform module to create [Fargate ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html) resources on AWS. 6 | 7 | ## Features 8 | 9 | * integration with AWS Cloud Map [service discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) 10 | * integration with [App Mesh](https://docs.aws.amazon.com/app-mesh/latest/userguide/what-is-app-mesh.html) including Envoy sidecar and IAM permission configuration 11 | * configuration of listener rules and target groups for [Application Load Balancers](#Load-Balancing) 12 | * [Elastic Container Registry](https://docs.aws.amazon.com/AmazonECR/latest/userguide/what-is-ecr.html) configuration like image scanning and lifecycle policies 13 | * [blue/green deployments]((#Automated-service-deployment)) using CodePipeline and CodeDeploy 14 | * configuration of [custom log routing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) using FireLens and Fluent Bit 15 | * CloudWatch log group and IAM permissions for storing container logs 16 | * [AWS Distro for OpenTelemetry](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/trace-data.html) sidecar and IAM permission configuration 17 | 18 | ## How do I use this module? 19 | 20 | ### with ALB integration 21 | 22 | see [example](examples/complete) for details 23 | 24 | ```terraform 25 | module "service" { 26 | source = "registry.terraform.io/stroeer/ecs-fargate/aws" 27 | 28 | cpu = 256 29 | cluster_id = "my-ecs-cluster-id" 30 | container_port = 8000 31 | create_ingress_security_group = false 32 | create_deployment_pipeline = false 33 | desired_count = 1 34 | ecr_force_delete = true 35 | memory = 512 36 | service_name = "my-service" 37 | vpc_id = module.vpc.vpc_id 38 | 39 | // add listener rules that determine how the load balancer routes requests to its registered targets. 40 | https_listener_rules = [{ 41 | listener_arn = aws_lb_listener.http.arn 42 | 43 | actions = [{ 44 | type = "forward" 45 | target_group_index = 0 46 | }] 47 | 48 | conditions = [{ 49 | path_patterns = ["/"] 50 | }] 51 | }] 52 | 53 | // add a target group to route ALB traffic to this service 54 | target_groups = [ 55 | { 56 | name = "my-service" 57 | backend_protocol = "HTTP" 58 | backend_port = 8000 59 | load_balancer_arn = "my-lb-arn" 60 | target_type = "ip" 61 | 62 | health_check = { 63 | enabled = true 64 | path = "/" 65 | protocol = "HTTP" 66 | } 67 | } 68 | ] 69 | } 70 | ``` 71 | ### with autoscaling 72 | 73 | ```terraform 74 | module "service" { 75 | // see above 76 | 77 | appautoscaling_settings = { 78 | predefined_metric_type = "ECSServiceAverageCPUUtilization" 79 | target_value = 30 80 | max_capacity = 8 81 | min_capacity = 2 82 | disable_scale_in = true 83 | scale_in_cooldown = 120 84 | scale_out_cooldown = 15 85 | } 86 | } 87 | ``` 88 | 89 | Use this configuration map to enable and alter the autoscaling settings for this app. 90 | 91 | |key|description| 92 | |---|---| 93 | |`target_value`| (mandatory) the target value, refers to `predefined_metric_type` | 94 | |`predefined_metric_type`| see [docs for possible values](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_PredefinedMetricSpecification.html)| 95 | |`max_capacity`| upper threshold for scale out | 96 | |`min_capacity`| lower threshold for scale in | 97 | |`disable_scale_in`| prevent scale in if set to `true` | 98 | |`scale_in_cooldown`| delay (in seconds) between scale in events | 99 | |`scale_out_cooldown`| delay (in seconds) between scale out events | 100 | 101 | 102 | ### with blue/green deployments 103 | 104 | This module will can create an automated deployment pipeline for your service (set `create_deployment_pipeline` is set to `true`). 105 | 106 |  107 | 108 | #### details 109 | 110 | * you'll need AWS credentials that allows pushing images into the ECR container registry. 111 | * Once you push an image with `[tag=production]` - a Cloudwatch Event will trigger the start of a CodePipeline. This tag will only trigger the pipeline. In addition, you'll need the following tags: 112 | * `container.$CONTAINER_NAME` is required to locate the correct container from the 113 | service's [task-definition.json](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/create-task-definition.html) 114 | * another tag that will be unique and used for the actual deployment and the task-definition.json. A good choice would 115 | be `git.sha`. To be specific, we chose a tag that does not `start with container.` and is none 116 | of `["local", "production", "staging", "infrastructure"]` 117 | 118 | **That CodePipeline will do the heavy lifting (see deployment flow above)** 119 | 120 | 1. Pull the full `imagedefinitions.json` from the ECR registry 121 | 2. Trigger a CodeBuild to transform the `imagedefinitions.json` into a `imagedefinitions.json` for deployment 122 | 3. Update the ECS service's task-definition by replacing the specified `imageUri` for the given `name`. 123 | 124 | **Notifications** 125 | 126 | We will create a notification rule for the pipeline. You can provide your ARN of a notification rule target (e.g. a SNS 127 | topic ARN) using 128 | `codestar_notifications_target_arn`. Otherwise a new SNS topic with required permissions is created for every service. 129 | See 130 | [aws_codestarnotifications_notification_rule](https://www.terraform.io/docs/providers/aws/r/codestarnotifications_notification_rule.html) 131 | for details. 132 | 133 | You can then configure an integration between those notifications 134 | and [AWS Chatbot](https://docs.aws.amazon.com/dtconsole/latest/userguide/notifications-chatbot.html) 135 | for example. 136 | 137 | **Optional shared pipeline resources** 138 | 139 | * A shared S3 bucket for storing artifacts from _CodePipeline_ can be used. You can specify it through the 140 | variable `code_pipeline_artifact_bucket`. Otherwise, a new bucket is created for every service. 141 | * A shared `IAM::Role` for _CodePipeline_ and _CodeBuild_ can be used. You can specify those through the 142 | variables `code_pipeline_role_name` and `code_build_role_name`. Otherwise, new roles are created for every service. For 143 | the permissions required see the [module code](./modules/deployment) 144 | 145 | ## Examples 146 | 147 | - [complete](examples/complete): complete example showcasing ALB integration, autoscaling and task definition configuration 148 | 149 | 150 | ## Requirements 151 | 152 | | Name | Version | 153 | |------|---------| 154 | | [terraform](#requirement\_terraform) | >= 1.3 | 155 | | [aws](#requirement\_aws) | >= 5.32 | 156 | 157 | ## Providers 158 | 159 | | Name | Version | 160 | |------|---------| 161 | | [aws](#provider\_aws) | >= 5.32 | 162 | 163 | ## Modules 164 | 165 | | Name | Source | Version | 166 | |------|--------|---------| 167 | | [code\_deploy](#module\_code\_deploy) | ./modules/deployment | n/a | 168 | | [container\_definition](#module\_container\_definition) | registry.terraform.io/cloudposse/config/yaml//modules/deepmerge | 1.0.2 | 169 | | [ecr](#module\_ecr) | ./modules/ecr | n/a | 170 | | [envoy\_container\_definition](#module\_envoy\_container\_definition) | registry.terraform.io/cloudposse/config/yaml//modules/deepmerge | 1.0.2 | 171 | | [fluentbit\_container\_definition](#module\_fluentbit\_container\_definition) | registry.terraform.io/cloudposse/config/yaml//modules/deepmerge | 1.0.2 | 172 | | [otel\_container\_definition](#module\_otel\_container\_definition) | registry.terraform.io/cloudposse/config/yaml//modules/deepmerge | 1.0.2 | 173 | | [sg](#module\_sg) | registry.terraform.io/terraform-aws-modules/security-group/aws | ~> 3.0 | 174 | 175 | ## Resources 176 | 177 | | Name | Type | 178 | |------|------| 179 | | [aws_alb_listener_rule.public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/alb_listener_rule) | resource | 180 | | [aws_alb_target_group.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/alb_target_group) | resource | 181 | | [aws_appautoscaling_policy.ecs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/appautoscaling_policy) | resource | 182 | | [aws_appautoscaling_target.ecs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/appautoscaling_target) | resource | 183 | | [aws_cloudwatch_log_group.containers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | 184 | | [aws_ecs_service.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_service) | resource | 185 | | [aws_ecs_task_definition.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_task_definition) | resource | 186 | | [aws_iam_policy.acm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 187 | | [aws_iam_policy.cloudwatch_logs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 188 | | [aws_iam_policy.enable_execute_command](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 189 | | [aws_iam_policy.fluent_bit_config_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 190 | | [aws_iam_policy.otel](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 191 | | [aws_iam_role.ecs_task_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | 192 | | [aws_iam_role.task_execution_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | 193 | | [aws_iam_role_policy.ecs_task_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | 194 | | [aws_iam_role_policy_attachment.acm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 195 | | [aws_iam_role_policy_attachment.appmesh](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 196 | | [aws_iam_role_policy_attachment.cloudwatch_logs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 197 | | [aws_iam_role_policy_attachment.enable_execute_command](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 198 | | [aws_iam_role_policy_attachment.fluent_bit_config_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 199 | | [aws_iam_role_policy_attachment.otel](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 200 | | [aws_security_group_rule.trusted_egress_attachment](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | 201 | | [aws_service_discovery_service.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/service_discovery_service) | resource | 202 | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | 203 | | [aws_ecs_task_definition.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecs_task_definition) | data source | 204 | | [aws_iam_policy.appmesh](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | 205 | | [aws_iam_policy.ecs_task_execution_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | 206 | | [aws_iam_policy_document.acm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 207 | | [aws_iam_policy_document.cloudwatch_logs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 208 | | [aws_iam_policy_document.ecs_task_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 209 | | [aws_iam_policy_document.enable_execute_command](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 210 | | [aws_iam_policy_document.fluent_bit_config_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 211 | | [aws_iam_policy_document.otel](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 212 | | [aws_iam_policy_document.task_execution_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 213 | | [aws_lb.public](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lb) | data source | 214 | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | 215 | | [aws_subnets.selected](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | 216 | 217 | ## Inputs 218 | 219 | | Name | Description | Type | Default | Required | 220 | |------|-------------|------|---------|:--------:| 221 | | [additional\_container\_definitions](#input\_additional\_container\_definitions) | Additional container definitions added to the task definition of this service, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html for allowed parameters. | `list(any)` | `[]` | no | 222 | | [app\_mesh](#input\_app\_mesh) | Configuration of optional AWS App Mesh integration using an Envoy sidecar. |
object({| `{}` | no | 223 | | [appautoscaling\_settings](#input\_appautoscaling\_settings) | Autoscaling configuration for this service. | `map(any)` | `null` | no | 224 | | [assign\_public\_ip](#input\_assign\_public\_ip) | Assign a public IP address to the ENI of this service. | `bool` | `false` | no | 225 | | [capacity\_provider\_strategy](#input\_capacity\_provider\_strategy) | Capacity provider strategies to use for the service. Can be one or more. |
container_definition = optional(any, {})
container_name = optional(string, "envoy")
enabled = optional(bool, false)
mesh_name = optional(string, "apps")
tls = optional(object({
acm_certificate_arn = optional(string)
root_ca_arn = optional(string)
}), {})
})
list(object({| `null` | no | 226 | | [cloudwatch\_logs](#input\_cloudwatch\_logs) | CloudWatch logs configuration for the containers of this service. CloudWatch logs will be used as the default log configuration if Firelens is disabled and for the fluentbit and otel containers. |
capacity_provider = string
weight = string
base = optional(string, null)
}))
object({| `{}` | no | 227 | | [cluster\_id](#input\_cluster\_id) | The ECS cluster id that should run this service | `string` | n/a | yes | 228 | | [code\_build\_environment\_compute\_type](#input\_code\_build\_environment\_compute\_type) | Information about the compute resources the CodeBuild stage of the deployment pipeline will use. | `string` | `"BUILD_LAMBDA_1GB"` | no | 229 | | [code\_build\_environment\_image](#input\_code\_build\_environment\_image) | Docker image to use for the CodeBuild stage of the deployment pipeline. The image needs to include python. | `string` | `"aws/codebuild/amazonlinux-aarch64-lambda-standard:python3.12"` | no | 230 | | [code\_build\_environment\_type](#input\_code\_build\_environment\_type) | Type of build environment for the CodeBuild stage of the deployment pipeline. | `string` | `"ARM_LAMBDA_CONTAINER"` | no | 231 | | [code\_build\_log\_retention\_in\_days](#input\_code\_build\_log\_retention\_in\_days) | Log retention in days of the CodeBuild CloudWatch log group. | `number` | `7` | no | 232 | | [code\_build\_role\_name](#input\_code\_build\_role\_name) | Use an existing role for codebuild permissions that can be reused for multiple services. Otherwise a separate role for this service will be created. | `string` | `""` | no | 233 | | [code\_pipeline\_artifact\_bucket](#input\_code\_pipeline\_artifact\_bucket) | Use an existing bucket for codepipeline artifacts that can be reused for multiple services. Otherwise a separate bucket for each service will be created. | `string` | `""` | no | 234 | | [code\_pipeline\_artifact\_bucket\_sse](#input\_code\_pipeline\_artifact\_bucket\_sse) | AWS KMS master key id for server-side encryption. | `any` | `{}` | no | 235 | | [code\_pipeline\_role\_name](#input\_code\_pipeline\_role\_name) | Use an existing role for codepipeline permissions that can be reused for multiple services. Otherwise a separate role for this service will be created. | `string` | `""` | no | 236 | | [code\_pipeline\_type](#input\_code\_pipeline\_type) | Type of the CodePipeline. Possible values are: `V1` and `V2`. | `string` | `"V1"` | no | 237 | | [code\_pipeline\_variables](#input\_code\_pipeline\_variables) | CodePipeline variables. Valid only when `codepipeline_type` is `V2`. |
enabled = optional(bool, true)
name = optional(string, "")
retention_in_days = optional(number, 7)
})
list(object({| `[]` | no | 238 | | [codestar\_notifications\_detail\_type](#input\_codestar\_notifications\_detail\_type) | The level of detail to include in the notifications for this resource. Possible values are BASIC and FULL. | `string` | `"BASIC"` | no | 239 | | [codestar\_notifications\_event\_type\_ids](#input\_codestar\_notifications\_event\_type\_ids) | A list of event types associated with this notification rule. For list of allowed events see https://docs.aws.amazon.com/dtconsole/latest/userguide/concepts.html#concepts-api. | `list(string)` |
name = string
default_value = optional(string)
description = optional(string)
}))
[| no | 240 | | [codestar\_notifications\_kms\_master\_key\_id](#input\_codestar\_notifications\_kms\_master\_key\_id) | AWS KMS master key id for server-side encryption. | `string` | `null` | no | 241 | | [codestar\_notifications\_target\_arn](#input\_codestar\_notifications\_target\_arn) | Use an existing ARN for a notification rule target (for example, a SNS Topic ARN). Otherwise a separate sns topic for this service will be created. | `string` | `""` | no | 242 | | [container\_definition\_overwrites](#input\_container\_definition\_overwrites) | Additional container definition parameters or overwrites of defaults for your service, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html for allowed parameters. | `any` | `{}` | no | 243 | | [container\_name](#input\_container\_name) | Defaults to var.service\_name, can be overridden if it differs. Used as a target for LB. | `string` | `""` | no | 244 | | [container\_port](#input\_container\_port) | The port used by the app within the container. | `number` | n/a | yes | 245 | | [cpu](#input\_cpu) | Amount of CPU required by this service. 1024 == 1 vCPU | `number` | `256` | no | 246 | | [cpu\_architecture](#input\_cpu\_architecture) | Must be set to either `X86_64` or `ARM64`, see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform. | `string` | `"X86_64"` | no | 247 | | [create\_deployment\_pipeline](#input\_create\_deployment\_pipeline) | Creates a deploy pipeline from ECR trigger if `create_ecr_repo == true`. | `bool` | `true` | no | 248 | | [create\_ecr\_repository](#input\_create\_ecr\_repository) | Create an ECR repository for this service. | `bool` | `true` | no | 249 | | [create\_ingress\_security\_group](#input\_create\_ingress\_security\_group) | Create a security group allowing ingress from target groups to the application ports. Disable this for target groups attached to a Network Loadbalancer. | `bool` | `true` | no | 250 | | [deployment\_circuit\_breaker](#input\_deployment\_circuit\_breaker) | Deployment circuit breaker configuration. |
"codepipeline-pipeline-pipeline-execution-succeeded",
"codepipeline-pipeline-pipeline-execution-failed"
]
object({|
enable = bool
rollback = bool
})
{| no | 251 | | [deployment\_failure\_detection\_alarms](#input\_deployment\_failure\_detection\_alarms) | CloudWatch alarms used to detect deployment failures. |
"enable": false,
"rollback": false
}
object({|
enable = bool
rollback = bool
alarm_names = list(string)
})
{| no | 252 | | [deployment\_maximum\_percent](#input\_deployment\_maximum\_percent) | Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. | `number` | `200` | no | 253 | | [deployment\_minimum\_healthy\_percent](#input\_deployment\_minimum\_healthy\_percent) | Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. | `number` | `100` | no | 254 | | [desired\_count](#input\_desired\_count) | Desired count of services to be started/running. | `number` | `0` | no | 255 | | [ecr\_custom\_lifecycle\_policy](#input\_ecr\_custom\_lifecycle\_policy) | JSON formatted ECR lifecycle policy used for this repository (disabled the default lifecycle policy), see https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lifecycle_policy_parameters for details. | `string` | `null` | no | 256 | | [ecr\_enable\_default\_lifecycle\_policy](#input\_ecr\_enable\_default\_lifecycle\_policy) | Enables an ECR lifecycle policy for this repository which expires all images except for the last 30. | `bool` | `true` | no | 257 | | [ecr\_force\_delete](#input\_ecr\_force\_delete) | If `true`, will delete this repository even if it contains images. | `bool` | `false` | no | 258 | | [ecr\_image\_scanning\_configuration](#input\_ecr\_image\_scanning\_configuration) | n/a | `map(any)` |
"alarm_names": [],
"enable": false,
"rollback": false
}
{| no | 259 | | [ecr\_image\_tag](#input\_ecr\_image\_tag) | Tag of the new image pushed to the Amazon ECR repository to trigger the deployment pipeline. | `string` | `"production"` | no | 260 | | [ecr\_image\_tag\_mutability](#input\_ecr\_image\_tag\_mutability) | n/a | `string` | `"MUTABLE"` | no | 261 | | [ecr\_repository\_name](#input\_ecr\_repository\_name) | Existing repo to register to use with this service module, e.g. creating deployment pipelines. | `string` | `""` | no | 262 | | [efs\_volumes](#input\_efs\_volumes) | Configuration block for EFS volumes. | `any` | `[]` | no | 263 | | [enable\_execute\_command](#input\_enable\_execute\_command) | Specifies whether to enable Amazon ECS Exec for the tasks within the service. | `bool` | `false` | no | 264 | | [extra\_port\_mappings](#input\_extra\_port\_mappings) | Additional ports to be exposed from the container. |
"scan_on_push": true
}
list(object({| `[]` | no | 265 | | [firelens](#input\_firelens) | Configuration for optional custom log routing using FireLens over fluentbit sidecar. Enable `attach_init_config_s3_policy` to attach an IAM policy granting access to the init config files on S3. |
hostPort = number
containerPort = number
protocol = optional(string, "tcp")
}))
object({| `{}` | no | 266 | | [force\_new\_deployment](#input\_force\_new\_deployment) | Enable to force a new task deployment of the service. This can be used to update tasks to use a newer Docker image with same image/tag combination (e.g. myimage:latest), roll Fargate tasks onto a newer platform version, or immediately deploy ordered\_placement\_strategy and placement\_constraints updates. | `bool` | `false` | no | 267 | | [health\_check\_grace\_period\_seconds](#input\_health\_check\_grace\_period\_seconds) | Seconds to ignore failing load balancer health checks on newly instantiated tasks to prevent premature shutdown, up to 2147483647. Only valid for services configured to use load balancers. | `number` | `0` | no | 268 | | [https\_listener\_rules](#input\_https\_listener\_rules) | A list of maps describing the Listener Rules for this ALB. Required key/values: actions, conditions. Optional key/values: priority, https\_listener\_index (default to https\_listeners[count.index]) | `any` | `[]` | no | 269 | | [memory](#input\_memory) | Amount of memory [MB] is required by this service. | `number` | `512` | no | 270 | | [operating\_system\_family](#input\_operating\_system\_family) | If the `requires_compatibilities` is `FARGATE` this field is required. Must be set to a valid option from https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform. | `string` | `"LINUX"` | no | 271 | | [otel](#input\_otel) | Configuration for (optional) AWS Distro für OpenTelemetry sidecar. |
attach_init_config_s3_policy = optional(bool, false)
container_name = optional(string, "fluentbit")
container_definition = optional(any, {})
enabled = optional(bool, false)
init_config_files = optional(list(string), [])
log_level = optional(string, "info")
opensearch_host = optional(string, "")
aws_region = optional(string)
})
object({| `{}` | no | 272 | | [platform\_version](#input\_platform\_version) | The platform version on which to run your service. Defaults to LATEST. | `string` | `"LATEST"` | no | 273 | | [policy\_document](#input\_policy\_document) | AWS Policy JSON describing the permissions required for this service. | `string` | `""` | no | 274 | | [requires\_compatibilities](#input\_requires\_compatibilities) | The launch type the task is using. This enables a check to ensure that all of the parameters used in the task definition meet the requirements of the launch type. | `set(string)` |
container_definition = optional(any, {})
enabled = optional(bool, false)
})
[| no | 275 | | [security\_groups](#input\_security\_groups) | A list of security group ids that will be attached additionally to the ecs deployment. | `list(string)` | `[]` | no | 276 | | [service\_discovery\_dns\_namespace](#input\_service\_discovery\_dns\_namespace) | The ID of a Service Discovery private DNS namespace. If provided, the module will create a Route 53 Auto Naming Service to enable service discovery using Cloud Map. | `string` | `""` | no | 277 | | [service\_name](#input\_service\_name) | The service name. Will also be used as Route53 DNS entry. | `string` | n/a | yes | 278 | | [subnet\_tags](#input\_subnet\_tags) | Map of tags to identify the subnets associated with this service. Each pair must exactly match a pair on the desired subnet. Defaults to `{ Tier = public }` for services with `assign_public_ip == true` and { Tier = private } otherwise. | `map(string)` | `null` | no | 279 | | [tags](#input\_tags) | Additional tags (\_e.g.\_ { map-migrated : d-example-443255fsf }) | `map(string)` | `{}` | no | 280 | | [target\_groups](#input\_target\_groups) | A list of maps containing key/value pairs that define the target groups to be created. Order of these maps is important and the index of these are to be referenced in listener definitions. Required key/values: name, backend\_protocol, backend\_port | `any` | `[]` | no | 281 | | [task\_execution\_role\_arn](#input\_task\_execution\_role\_arn) | ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. If not provided, a default role will be created and used. | `string` | `""` | no | 282 | | [task\_role\_arn](#input\_task\_role\_arn) | ARN of the IAM role that allows your Amazon ECS container task to make calls to other AWS services. If not specified, the default ECS task role created in this module will be used. | `string` | `""` | no | 283 | | [vpc\_id](#input\_vpc\_id) | VPC id where the load balancer and other resources will be deployed. | `string` | n/a | yes | 284 | 285 | ## Outputs 286 | 287 | | Name | Description | 288 | |------|-------------| 289 | | [alb\_target\_group\_arn\_suffixes](#output\_alb\_target\_group\_arn\_suffixes) | ARN suffixes of the created target groups. | 290 | | [alb\_target\_group\_arns](#output\_alb\_target\_group\_arns) | ARNs of the created target groups. | 291 | | [autoscaling\_target](#output\_autoscaling\_target) | ECS auto scaling targets if auto scaling enabled. | 292 | | [cloudwatch\_log\_group](#output\_cloudwatch\_log\_group) | Name of the CloudWatch log group for container logs. | 293 | | [container\_definitions](#output\_container\_definitions) | Container definitions used by this service including all sidecars. | 294 | | [ecr\_repository\_arn](#output\_ecr\_repository\_arn) | Full ARN of the ECR repository. | 295 | | [ecr\_repository\_id](#output\_ecr\_repository\_id) | The registry ID where the repository was created. | 296 | | [ecr\_repository\_url](#output\_ecr\_repository\_url) | The URL of the repository (in the form `aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName`) | 297 | | [task\_execution\_role\_arn](#output\_task\_execution\_role\_arn) | ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. | 298 | | [task\_execution\_role\_name](#output\_task\_execution\_role\_name) | Friendly name of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. | 299 | | [task\_execution\_role\_unique\_id](#output\_task\_execution\_role\_unique\_id) | Stable and unique string identifying the IAM role that the Amazon ECS container agent and the Docker daemon can assume. | 300 | | [task\_role\_arn](#output\_task\_role\_arn) | ARN of IAM role that allows your Amazon ECS container task to make calls to other AWS services. | 301 | | [task\_role\_name](#output\_task\_role\_name) | Friendly name of IAM role that allows your Amazon ECS container task to make calls to other AWS services. | 302 | | [task\_role\_unique\_id](#output\_task\_role\_unique\_id) | Stable and unique string identifying the IAM role that allows your Amazon ECS container task to make calls to other AWS services. | 303 | 304 | -------------------------------------------------------------------------------- /alb.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * https://github.com/terraform-aws-modules/terraform-aws-alb 3 | */ 4 | 5 | resource "aws_alb_target_group" "main" { 6 | count = length(var.target_groups) 7 | 8 | name = lookup(var.target_groups[count.index], "name", null) 9 | name_prefix = lookup(var.target_groups[count.index], "name_prefix", null) 10 | 11 | vpc_id = var.vpc_id 12 | port = lookup(var.target_groups[count.index], "backend_port", null) 13 | protocol = lookup(var.target_groups[count.index], "backend_protocol", null) != null ? upper(lookup(var.target_groups[count.index], "backend_protocol", null)) : null 14 | protocol_version = lookup(var.target_groups[count.index], "protocol_version", null) != null ? upper(lookup(var.target_groups[count.index], "protocol_version", null)) : null 15 | target_type = lookup(var.target_groups[count.index], "target_type", null) 16 | 17 | deregistration_delay = lookup(var.target_groups[count.index], "deregistration_delay", null) 18 | slow_start = lookup(var.target_groups[count.index], "slow_start", null) 19 | proxy_protocol_v2 = lookup(var.target_groups[count.index], "proxy_protocol_v2", false) 20 | lambda_multi_value_headers_enabled = lookup(var.target_groups[count.index], "lambda_multi_value_headers_enabled", false) 21 | load_balancing_algorithm_type = lookup(var.target_groups[count.index], "load_balancing_algorithm_type", null) 22 | connection_termination = lookup(var.target_groups[count.index], "connection_termination", false) 23 | 24 | dynamic "health_check" { 25 | for_each = length(keys(lookup(var.target_groups[count.index], "health_check", {}))) == 0 ? [] : [ 26 | lookup(var.target_groups[count.index], "health_check", {}) 27 | ] 28 | 29 | content { 30 | enabled = lookup(health_check.value, "enabled", null) 31 | interval = lookup(health_check.value, "interval", null) 32 | path = lookup(health_check.value, "path", null) 33 | port = lookup(health_check.value, "port", null) 34 | healthy_threshold = lookup(health_check.value, "healthy_threshold", null) 35 | unhealthy_threshold = lookup(health_check.value, "unhealthy_threshold", null) 36 | timeout = lookup(health_check.value, "timeout", null) 37 | protocol = lookup(health_check.value, "protocol", null) 38 | matcher = lookup(health_check.value, "matcher", null) 39 | } 40 | } 41 | 42 | lifecycle { 43 | create_before_destroy = true 44 | } 45 | } 46 | 47 | resource "aws_alb_listener_rule" "public" { 48 | count = length(var.https_listener_rules) 49 | 50 | listener_arn = lookup(var.https_listener_rules[count.index], "listener_arn", null) 51 | priority = lookup(var.https_listener_rules[count.index], "priority", null) 52 | 53 | # authenticate-cognito actions 54 | dynamic "action" { 55 | for_each = [ 56 | for action_rule in var.https_listener_rules[count.index].actions : 57 | action_rule 58 | if action_rule.type == "authenticate-cognito" 59 | ] 60 | 61 | content { 62 | type = action.value["type"] 63 | authenticate_cognito { 64 | authentication_request_extra_params = lookup(action.value, "authentication_request_extra_params", null) 65 | on_unauthenticated_request = lookup(action.value, "on_authenticated_request", null) 66 | scope = lookup(action.value, "scope", null) 67 | session_cookie_name = lookup(action.value, "session_cookie_name", null) 68 | session_timeout = lookup(action.value, "session_timeout", null) 69 | user_pool_arn = action.value["user_pool_arn"] 70 | user_pool_client_id = action.value["user_pool_client_id"] 71 | user_pool_domain = action.value["user_pool_domain"] 72 | } 73 | } 74 | } 75 | 76 | # authenticate-oidc actions 77 | dynamic "action" { 78 | for_each = [ 79 | for action_rule in var.https_listener_rules[count.index].actions : 80 | action_rule 81 | if action_rule.type == "authenticate-oidc" 82 | ] 83 | 84 | content { 85 | type = action.value["type"] 86 | authenticate_oidc { 87 | # Max 10 extra params 88 | authentication_request_extra_params = lookup(action.value, "authentication_request_extra_params", null) 89 | authorization_endpoint = action.value["authorization_endpoint"] 90 | client_id = action.value["client_id"] 91 | client_secret = action.value["client_secret"] 92 | issuer = action.value["issuer"] 93 | on_unauthenticated_request = lookup(action.value, "on_unauthenticated_request", null) 94 | scope = lookup(action.value, "scope", null) 95 | session_cookie_name = lookup(action.value, "session_cookie_name", null) 96 | session_timeout = lookup(action.value, "session_timeout", null) 97 | token_endpoint = action.value["token_endpoint"] 98 | user_info_endpoint = action.value["user_info_endpoint"] 99 | } 100 | } 101 | } 102 | 103 | # redirect actions 104 | dynamic "action" { 105 | for_each = [ 106 | for action_rule in var.https_listener_rules[count.index].actions : 107 | action_rule 108 | if action_rule.type == "redirect" 109 | ] 110 | 111 | content { 112 | type = action.value["type"] 113 | redirect { 114 | host = lookup(action.value, "host", null) 115 | path = lookup(action.value, "path", null) 116 | port = lookup(action.value, "port", null) 117 | protocol = lookup(action.value, "protocol", null) 118 | query = lookup(action.value, "query", null) 119 | status_code = action.value["status_code"] 120 | } 121 | } 122 | } 123 | 124 | # fixed-response actions 125 | dynamic "action" { 126 | for_each = [ 127 | for action_rule in var.https_listener_rules[count.index].actions : 128 | action_rule 129 | if action_rule.type == "fixed-response" 130 | ] 131 | 132 | content { 133 | type = action.value["type"] 134 | fixed_response { 135 | message_body = lookup(action.value, "message_body", null) 136 | status_code = lookup(action.value, "status_code", null) 137 | content_type = action.value["content_type"] 138 | } 139 | } 140 | } 141 | 142 | # forward actions 143 | dynamic "action" { 144 | for_each = [ 145 | for action_rule in var.https_listener_rules[count.index].actions : 146 | action_rule 147 | if action_rule.type == "forward" 148 | ] 149 | 150 | content { 151 | type = action.value["type"] 152 | target_group_arn = aws_alb_target_group.main[lookup(action.value, "target_group_index", count.index)].id 153 | } 154 | } 155 | 156 | # Path Pattern condition 157 | dynamic "condition" { 158 | for_each = [ 159 | for condition_rule in var.https_listener_rules[count.index].conditions : 160 | condition_rule 161 | if length(lookup(condition_rule, "path_patterns", [])) > 0 162 | ] 163 | 164 | content { 165 | path_pattern { 166 | values = condition.value["path_patterns"] 167 | } 168 | } 169 | } 170 | 171 | # Host header condition 172 | dynamic "condition" { 173 | for_each = [ 174 | for condition_rule in var.https_listener_rules[count.index].conditions : 175 | condition_rule 176 | if length(lookup(condition_rule, "host_headers", [])) > 0 177 | ] 178 | 179 | content { 180 | host_header { 181 | values = condition.value["host_headers"] 182 | } 183 | } 184 | } 185 | 186 | # Http header condition 187 | dynamic "condition" { 188 | for_each = [ 189 | for condition_rule in var.https_listener_rules[count.index].conditions : 190 | condition_rule 191 | if length(lookup(condition_rule, "http_headers", [])) > 0 192 | ] 193 | 194 | content { 195 | dynamic "http_header" { 196 | for_each = condition.value["http_headers"] 197 | 198 | content { 199 | http_header_name = http_header.value["http_header_name"] 200 | values = http_header.value["values"] 201 | } 202 | } 203 | } 204 | } 205 | 206 | # Http request method condition 207 | dynamic "condition" { 208 | for_each = [ 209 | for condition_rule in var.https_listener_rules[count.index].conditions : 210 | condition_rule 211 | if length(lookup(condition_rule, "http_request_methods", [])) > 0 212 | ] 213 | 214 | content { 215 | http_request_method { 216 | values = condition.value["http_request_methods"] 217 | } 218 | } 219 | } 220 | 221 | # Query string condition 222 | dynamic "condition" { 223 | for_each = [ 224 | for condition_rule in var.https_listener_rules[count.index].conditions : 225 | condition_rule 226 | if length(lookup(condition_rule, "query_strings", [])) > 0 227 | ] 228 | 229 | content { 230 | dynamic "query_string" { 231 | for_each = condition.value["query_strings"] 232 | 233 | content { 234 | key = lookup(query_string.value, "key", null) 235 | value = query_string.value["value"] 236 | } 237 | } 238 | } 239 | } 240 | 241 | # Source IP address condition 242 | dynamic "condition" { 243 | for_each = [ 244 | for condition_rule in var.https_listener_rules[count.index].conditions : 245 | condition_rule 246 | if length(lookup(condition_rule, "source_ips", [])) > 0 247 | ] 248 | 249 | content { 250 | source_ip { 251 | values = condition.value["source_ips"] 252 | } 253 | } 254 | } 255 | } 256 | -------------------------------------------------------------------------------- /cloudwatch_logs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_log_group" "containers" { 2 | count = var.cloudwatch_logs.enabled && var.cloudwatch_logs.name == "" ? 1 : 0 3 | 4 | name = var.cloudwatch_logs.name == "" ? "/aws/ecs/${var.service_name}" : var.cloudwatch_logs.name 5 | retention_in_days = var.cloudwatch_logs.retention_in_days 6 | tags = var.tags 7 | } 8 | 9 | data "aws_iam_policy_document" "cloudwatch_logs_policy" { 10 | count = var.cloudwatch_logs.enabled && var.task_role_arn == "" ? 1 : 0 11 | 12 | statement { 13 | actions = [ 14 | "logs:CreateLogStream", 15 | "logs:CreateLogGroup", 16 | "logs:DescribeLogStreams", 17 | "logs:PutLogEvents" 18 | ] 19 | 20 | resources = [aws_cloudwatch_log_group.containers[count.index].arn] 21 | } 22 | } 23 | 24 | resource "aws_iam_policy" "cloudwatch_logs_policy" { 25 | count = var.task_role_arn == "" ? 1 : 0 26 | 27 | name = "cw-logs-access-${var.service_name}-${data.aws_region.current.name}" 28 | path = "/ecs/task-role/" 29 | policy = data.aws_iam_policy_document.cloudwatch_logs_policy[count.index].json 30 | } 31 | 32 | resource "aws_iam_role_policy_attachment" "cloudwatch_logs_policy" { 33 | count = var.cloudwatch_logs.enabled && var.task_role_arn == "" ? 1 : 0 34 | 35 | role = aws_iam_role.ecs_task_role[count.index].name 36 | policy_arn = aws_iam_policy.cloudwatch_logs_policy[count.index].arn 37 | } 38 | -------------------------------------------------------------------------------- /container_definition.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ecr_repository_name = var.ecr_repository_name != "" ? var.ecr_repository_name : var.service_name 3 | // mandatory app container with overridable defaults 4 | app_container_defaults = { 5 | dependsOn = var.app_mesh.enabled ? [{ containerName = var.app_mesh.container_name, condition = "HEALTHY" }] : [] 6 | essential = true 7 | image = "${data.aws_caller_identity.current.account_id}.dkr.ecr.${data.aws_region.current.name}.amazonaws.com/${local.ecr_repository_name}:${var.ecr_image_tag}" 8 | name = var.service_name 9 | readonlyRootFilesystem = true 10 | mountPoints = [] 11 | systemControls = [] 12 | user = startswith(upper(var.operating_system_family), "WINDOWS") ? null : "0" 13 | volumesFrom = [] 14 | 15 | logConfiguration = var.firelens.enabled && var.firelens.opensearch_host != "" ? { 16 | logDriver = "awsfirelens", 17 | options = { 18 | Aws_Auth = "On" 19 | Aws_Region = null != var.firelens.aws_region ? var.firelens.aws_region : data.aws_region.current.name 20 | Host = var.firelens.opensearch_host 21 | Logstash_Format = "true" 22 | Logstash_Prefix = "${var.service_name}-app" 23 | Name = "opensearch" 24 | Port = "443" 25 | Suppress_Type_Name = "On" 26 | tls = "On" 27 | Trace_Output = "Off" 28 | } 29 | } : (var.cloudwatch_logs.enabled ? { 30 | logDriver = "awslogs" 31 | options = { 32 | awslogs-group : aws_cloudwatch_log_group.containers[0].name 33 | awslogs-region : data.aws_region.current.name 34 | awslogs-stream-prefix : "${var.service_name}-app" 35 | } 36 | } : null) 37 | 38 | # concat with var.extra_port_mappings 39 | portMappings = concat([ 40 | { 41 | hostPort = var.container_port, 42 | containerPort = var.container_port, 43 | protocol = "tcp" 44 | } 45 | ], var.extra_port_mappings) 46 | 47 | ulimits = startswith(upper(var.operating_system_family), "WINDOWS") ? [] : [ 48 | { 49 | name = "nofile" 50 | softLimit = 1024 * 32, // default is 1024 51 | hardLimit = 4096 * 32 // default is 4096 52 | } 53 | ] 54 | } 55 | 56 | app_container = jsonencode(module.container_definition.merged) 57 | } 58 | 59 | module "container_definition" { 60 | source = "registry.terraform.io/cloudposse/config/yaml//modules/deepmerge" 61 | version = "1.0.2" 62 | 63 | maps = [ 64 | local.app_container_defaults, 65 | var.container_definition_overwrites 66 | ] 67 | } 68 | -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | 4 | data "aws_iam_policy" "ecs_task_execution_policy" { 5 | count = var.task_execution_role_arn == "" ? 1 : 0 6 | 7 | name = "AmazonECSTaskExecutionRolePolicy" 8 | } 9 | -------------------------------------------------------------------------------- /docs/ECS Deployer.drawio: -------------------------------------------------------------------------------- 1 |
"EC2",
"FARGATE"
]