├── .github ├── integration_test.tfvars └── workflows │ └── integration_test.yml ├── .gitignore ├── README.md └── my_project ├── .github └── workflows │ └── cd.yml ├── .infrastructure ├── .gitignore ├── README.md └── workspaces │ └── default │ ├── .terraform.lock.hcl │ ├── .tool-versions │ ├── code_deploy.tf │ ├── database.tf │ ├── ecr.tf │ ├── ecs.tf │ ├── gha.tf │ ├── main.tf │ ├── outputs.tf │ ├── secrets_manager.tf │ ├── variables.tf │ └── version.tf └── README.md /.github/integration_test.tfvars: -------------------------------------------------------------------------------- 1 | github_repository = "my_org/my_project" 2 | aws_account_id = "test" 3 | aws_region = "us-east-1" 4 | environment = "test" 5 | rds_db_port = 4511 -------------------------------------------------------------------------------- /.github/workflows/integration_test.yml: -------------------------------------------------------------------------------- 1 | 2 | name: integration-test 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | integration: 10 | runs-on: ubuntu-latest 11 | env: 12 | LOCALSTACK_API_KEY: ${{ secrets.LOCALSTACK_API_KEY }} 13 | DNS_ADDRESS: 127.0.0.1 14 | steps: 15 | - name: Checkout Branch 16 | uses: actions/checkout@v3 17 | with: 18 | fetch-depth: 0 19 | 20 | - name: Setup terraform 21 | uses: hashicorp/setup-terraform@v2 22 | 23 | - name: Start LocalStack 24 | run: | 25 | pip install localstack terraform-local # install LocalStack cli and terraform-local 26 | docker pull localstack/localstack-pro # Make sure to pull the latest version of the image 27 | 28 | localstack start -d # Start LocalStack in the background 29 | 30 | echo "Waiting for LocalStack startup..." # Wait 30 seconds for the LocalStack container 31 | localstack wait -t 30 # to become ready before timing out 32 | echo "Startup complete" 33 | 34 | - name: Terraform apply 35 | run: | 36 | cp ./.github/integration_test.tfvars /tmp/integration_test.tfvars 37 | 38 | BRANCH=`git rev-parse --abbrev-ref HEAD` 39 | CODEDEPLOY_COMMIT_HASH=`git log --grep="CodeDeploy" --format='%H'` 40 | 41 | for COMMIT_HASH in $(git rev-list --reverse $BRANCH) 42 | do 43 | git checkout -f $COMMIT_HASH 44 | 45 | if $COMMIT_HASH == $CODEDEPLOY_COMMIT_HASH; then 46 | echo "LocalStack doesn't support CodeDeploy so skipping the commit" 47 | continue 48 | fi 49 | 50 | if [ -f "my_project/.infrastructure/workspaces/default/version.tf" ]; then 51 | GIT_MESSAGE=`git show --oneline -s` 52 | echo "Testing $GIT_MESSAGE" 53 | 54 | if [ -f "my_project/.infrastructure/workspaces/default/code_deploy.tf" ]; then 55 | echo "LocalStack doesn't support CodeDeploy so reverting the commit" 56 | git revert --no-commit $CODEDEPLOY_COMMIT_HASH 57 | fi 58 | 59 | if [[ $GIT_MESSAGE =~ "Make Github Actions trigger deployment" ]] || [[ $GIT_MESSAGE =~ "Monolith with secondary port deployment strategy" ]]; then 60 | echo "LocalStack has issues with continuing TF apply with this commit so we'll reset the TF env" 61 | localstack stop 62 | localstack start -d 63 | localstack wait -t 30 64 | rm my_project/.infrastructure/workspaces/default/terraform.tfstate 65 | fi 66 | 67 | cd my_project/.infrastructure/workspaces/default 68 | tflocal init 69 | tflocal apply -var-file=/tmp/integration_test.tfvars --input=false --auto-approve 70 | cd ../../../../ 71 | fi 72 | done 73 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Elixir Terraform AWS ECS Example 2 | 3 | There are some resources on terraform and/or ECS setup with Elixir, but all were missing some pieces I needed. Either they were outdated, or incomplete for my use case. So I've created this repo to show how you can set up a complete production-ready ECS setup with terraform. 4 | 5 | ## Features 6 | 7 | - Github Actions builds and pushes to ECR 8 | - ECS with rolling deployment 9 | - ECS with blue-green deployment using CodeDeploy 10 | - Github Actions triggers deployment 11 | - Monolithic support with multiple ports 12 | - Secrets in Secrets Manager 13 | - CloudWatch for logging 14 | - RDS postgres instance 15 | - Elixir clustering 16 | 17 | This includes the necessary network and permissions configuration. 18 | 19 | ## Caveat for CodeDeploy and multiple ports 20 | 21 | One caveat to know is that AWS doesn't support multiple target groups for CodeDeploy controller. So for blue-green deployment to work with multiple ports on the instance it's necessary to set up separate ECS services for each port. 22 | 23 | If you are not going to use blue-green deployment then you can just add a second `load_balance` on the `aws_ecs_service`. 24 | 25 | ## Prerequisites 26 | 27 | It's expected that you already have an Elixir app dockerized with an `entrypoint.sh` bash script. See [`my_project/README.md`](my_project/README.md) for details. 28 | 29 | The dockerfile is expected to exist in `.release/Dockerfile`. Adjust the dockerfile path in [.github/worksflows/cd.yml](my_project/.github/workflows/cd.yml) if it's in a different location. 30 | 31 | ## Go by commits 32 | 33 | To make it easier to understand what each part does you should follow the commit history. It'll go step-by-step for each feature. 34 | 35 | ## Github Actions variables 36 | 37 | Github Actions will need all the variables from the terraform output set for the CD workflow to work. 38 | 39 | ## Elixir code changes 40 | 41 | ### Database URL 42 | 43 | The terraform build is set up to use Secrets Manager for the database. Furthermore password rotation is supported, however that means we can't store the connection string as is. Instead we need to build it (or if you prefer you can also just pass in the host/username/password directly): 44 | 45 | ```elixir 46 | # AWS RDS rotation requires us to store db credentials in a specific format 47 | # in Secrets Manager so we will build the DSN here 48 | if credentials = System.get_env("DATABASE_CREDENTIALS") do 49 | %{ 50 | "engine" => engine, 51 | "host" => host, 52 | "username" => username, 53 | "password" => password, 54 | "dbname" => dbname, 55 | "port" => port 56 | } = Jason.decode!(credentials) 57 | 58 | dsn = "#{engine}://#{URI.encode_www_form(username)}:#{URI.encode_www_form(password)}@#{host}:#{port}/#{dbname}" 59 | 60 | System.put_env("DATABASE_URL", dsn) 61 | end 62 | ``` 63 | 64 | ### Elixir cluster 65 | 66 | Private DNS is used for cluster discovery. You should setup libcluster with the following `config/runtime.ex` configuration: 67 | 68 | ```elixir 69 | case System.fetch_env("DNS_POLL_QUERY") do 70 | :error -> 71 | :ok 72 | 73 | {:ok, query} -> 74 | [node_basename, _host] = String.split(System.fetch_env!("RELEASE_NODE"), "@") 75 | 76 | config :libcluster, 77 | topologies: [ 78 | dns: [ 79 | strategy: Cluster.Strategy.DNSPoll, 80 | config: [ 81 | polling_interval: 1_000, 82 | query: query, 83 | node_basename: node_basename]]] 84 | end 85 | ``` 86 | -------------------------------------------------------------------------------- /my_project/.github/workflows/cd.yml: -------------------------------------------------------------------------------- 1 | name: CD 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | # This is necessary for Github Actions OIDC 8 | permissions: 9 | id-token: write 10 | contents: read 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | outputs: 16 | aws_region: ${{ vars.AWS_REGION }} 17 | docker_image: ${{ steps.ecr-login.outputs.registry }}/${{ vars.AWS_ECR_REPO }}@${{ steps.docker-build.outputs.digest }} 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v3 21 | 22 | - name: Setup AWS credentials 23 | uses: aws-actions/configure-aws-credentials@v1 24 | with: 25 | role-to-assume: ${{ vars.AWS_BUILD_ROLE }} 26 | aws-region: ${{ vars.AWS_REGION }} 27 | mask-aws-account-id: 'no' 28 | 29 | - name: Login to AWS repository 30 | id: ecr-login 31 | uses: aws-actions/amazon-ecr-login@v1 32 | 33 | # The following prepends the .release/entrypoint.sh shell script with RELEASE_NODE env var. 34 | # Change the path if the entrypoint shell script is located in a different place. 35 | # 36 | # Please make sure that `curl` and `jq` is being installed in the release image. 37 | - name: Add RELEASE_NODE variable in entrypoint.sh 38 | run: | 39 | mv .release/entrypoint.sh /tmp/entrypoint.sh 40 | cat - /tmp/entrypoint.sh <<'SH' > .release/entrypoint.sh 41 | export RELEASE_DISTRIBUTION=name 42 | export RELEASE_NODE=node-${{ github.sha }}@`curl -s $ECS_CONTAINER_METADATA_URI_V4 | jq -r ".Networks[0].IPv4Addresses[0]"` 43 | 44 | SH 45 | 46 | - name: Build image metadata 47 | id: meta 48 | uses: docker/metadata-action@v4 49 | with: 50 | images: ${{ steps.ecr-login.outputs.registry }}/${{ vars.AWS_ECR_REPO }} 51 | # The release will be tagged with the branch and the short git sha (and latest if on main) 52 | tags: | 53 | type=sha,prefix={{branch}}- 54 | type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'main') }} 55 | 56 | - name: Build, tag, and push to ECR 57 | id: docker-build 58 | uses: docker/build-push-action@v3 59 | with: 60 | context: . 61 | # Replace this if the dockerfile is at a different path 62 | file: .release/Dockerfile 63 | push: true 64 | tags: ${{ steps.meta.outputs.tags }} 65 | labels: ${{ steps.meta.outputs.labels }} 66 | 67 | deploy: 68 | runs-on: ubuntu-latest 69 | concurrency: deployment 70 | env: 71 | DOCKER_IMAGE: ${{ needs.build.outputs.docker_image }} 72 | needs: 73 | - build 74 | steps: 75 | - name: Setup AWS credentials 76 | uses: aws-actions/configure-aws-credentials@v1 77 | with: 78 | role-to-assume: ${{ vars.AWS_DEPLOY_ROLE }} 79 | aws-region: ${{ vars.AWS_REGION }} 80 | 81 | # This will download the task definition template managed by Terraform 82 | # and modify the family and image attributes. 83 | - name: Build ECS task definition 84 | run: | 85 | aws ecs describe-task-definition --task-definition ${{ vars.AWS_SERVICE_NAME }}-template --query taskDefinition > task-definition.json 86 | echo "`jq '.family="${{ vars.AWS_SERVICE_NAME }}" | .containerDefinitions[0].image="${{ env.DOCKER_IMAGE }}"' task-definition.json`" > task-definition.json 87 | 88 | - name: Build CodeDeploy app spec 89 | run: | 90 | cat <> apps-spec.yaml 91 | 92 | version: 1 93 | 94 | Resources: 95 | - TargetService: 96 | Type: AWS::ECS::Service 97 | Properties: 98 | TaskDefinition: "Placeholder: GitHub Actions will fill this in" 99 | LoadBalancerInfo: 100 | ContainerName: "${{ vars.AWS_SERVICE_NAME }}" 101 | ContainerPort: 4000 102 | SPEC 103 | 104 | - name: Deploy app1 105 | uses: aws-actions/amazon-ecs-deploy-task-definition@v1 106 | with: 107 | task-definition: task-definition.json 108 | cluster: ${{ vars.AWS_SERVICE_NAME }} 109 | service: ${{ vars.AWS_SERVICE_NAME }}-app1 110 | codedeploy-appspec: apps-spec.yaml 111 | codedeploy-application: ${{ vars.AWS_SERVICE_NAME }} 112 | codedeploy-deployment-group: ${{ vars.AWS_SERVICE_NAME }} 113 | wait-for-service-stability: true 114 | 115 | - name: Deploy app2 116 | uses: aws-actions/amazon-ecs-deploy-task-definition@v1 117 | with: 118 | task-definition: task-definition.json 119 | cluster: ${{ vars.AWS_SERVICE_NAME }} 120 | service: ${{ vars.AWS_SERVICE_NAME }}-app2 121 | wait-for-service-stability: true 122 | -------------------------------------------------------------------------------- /my_project/.infrastructure/.gitignore: -------------------------------------------------------------------------------- 1 | **/.terraform/* 2 | *.tfstate 3 | *.tfstate.* 4 | -------------------------------------------------------------------------------- /my_project/.infrastructure/README.md: -------------------------------------------------------------------------------- 1 | ## Infrastructure 2 | 3 | This directory consists of all IaC files. 4 | -------------------------------------------------------------------------------- /my_project/.infrastructure/workspaces/default/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.53.0" 6 | constraints = "~> 4.46" 7 | hashes = [ 8 | "h1:P6ZZ716SRIimw0t/SAgYbOMZtO0HDvwVQKxyHEW6aaE=", 9 | "zh:0d44171544a916adf0fa96b7d0851a49d8dec98f71f0229dfd2d178958b3996b", 10 | "zh:16945808ce26b86af7f5a77c4ab1154da786208c793abb95b8f918b4f48daded", 11 | "zh:1a57a5a30cef9a5867579d894b74f60bb99afc7ca0d030d49a80ad776958b428", 12 | "zh:2c718734ae17430d7f598ca0b4e4f86d43d66569c72076a10f4ace3ff8dfc605", 13 | "zh:46fdf6301cb2fa0a4d122d1a8f75f047b6660c24851d6a4537ee38926a86485d", 14 | "zh:53a53920b38a9e1648e85c6ee33bccf95bfcd067bffc4934a2af55621e6a6bd9", 15 | "zh:548d927b234b1914c43169224b03f641d0961a4e312e5c6508657fce27b66db4", 16 | "zh:57c847b2a5ae41ddea20b18ef006369d36bfdc4dec7f542f60e22a47f7b6f347", 17 | "zh:79f7402b581621ba69f5a07ce70299735c678beb265d114d58955d04f0d39f87", 18 | "zh:8970109a692dc4ecbda98a0969da472da4759db90ce22f2a196356ea85bb2cf7", 19 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 20 | "zh:a500cc4ffcad854dec0cf6f97751930a53c9f278f143a4355fa8892aa77c77bf", 21 | "zh:b687c20b42a8b9e9e9f56c42e3b3c6859c043ec72b8907a6e4d4b64068e11df5", 22 | "zh:e2c592e96822b78287554be43c66398f658c74c4ae3796f6b9e6d4b0f1f7f626", 23 | "zh:ff1c4a46fdc988716c6fc28925549600093fc098828237cb1a30264e15cf730f", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/random" { 28 | version = "3.4.3" 29 | hashes = [ 30 | "h1:saZR+mhthL0OZl4SyHXZraxyaBNVMxiZzks78nWcZ2o=", 31 | "zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752", 32 | "zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b", 33 | "zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53", 34 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 35 | "zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3", 36 | "zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5", 37 | "zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda", 38 | "zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6", 39 | "zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1", 40 | "zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d", 41 | "zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8", 42 | "zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93", 43 | ] 44 | } 45 | 46 | provider "registry.terraform.io/integrations/github" { 47 | version = "5.14.0" 48 | constraints = "~> 5.12" 49 | hashes = [ 50 | "h1:5eGFHxGifts5hfBMHMl0Nv6G0DV+0ZMign7aRcHYBas=", 51 | "zh:108b33597b602b5b27c80437614f199719d7fba2c692c098665357ffa1453d62", 52 | "zh:122ecea6dfeaf4f05598fbce6a3b6d2ddc691b617a7eb36a667822bc3acc2029", 53 | "zh:358948f71f43f4919c2070620e81d6ec50b40cf7fb5690d0b81482f74910bddd", 54 | "zh:3749ae30da070fc30525cc036e00ff6046ff78df8c06642ba138f4183849d363", 55 | "zh:48dd9d69bfa13dcd75cae192e0d3445f6d37b7ad21726ec5aea3001896ed658a", 56 | "zh:5c6ef4dd8c510d8f18956956fce3756c5d68976e2d6b53be891e237cb5f0565c", 57 | "zh:5dd3202ed417307345f0f3310fdfb3033cd99f4f87fa336a3b6e338ef2c6888b", 58 | "zh:8469392a627df37e248240f280bbf59e257744768492778c616fa1c613c9c637", 59 | "zh:87d7c70b538c71c19a837684d14329dcb69751051b41896b2d2b91ec9aebda39", 60 | "zh:8a8781279a01c9d2e3633f7247ac3ddeb697e00b33c06168eb2c6a2a67dfb3a9", 61 | "zh:8ef7319c26b271f9d48cf4a4075e11cf904f43a5d8ac0cfe7d05049c77c32716", 62 | "zh:9d7ee91e9ceddf1b753e7923b0a017178398b7d5ce1dc08f6aae3d9de747f262", 63 | "zh:a8cdf17c25b6d77c0a1498507747c8aa26f3042b01bc67f89ab3d388ca39ec19", 64 | "zh:ba3393c640dc69644c00d4735b38ac862e7f1e3b1461ba66724feb871380a1fb", 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /my_project/.infrastructure/workspaces/default/.tool-versions: -------------------------------------------------------------------------------- 1 | python 3.11.1 2 | -------------------------------------------------------------------------------- /my_project/.infrastructure/workspaces/default/code_deploy.tf: -------------------------------------------------------------------------------- 1 | resource "aws_codedeploy_app" "this" { 2 | name = local.name 3 | compute_platform = "ECS" 4 | } 5 | 6 | resource "aws_codedeploy_deployment_group" "this" { 7 | app_name = aws_codedeploy_app.this.name 8 | deployment_group_name = local.name 9 | deployment_config_name = "CodeDeployDefault.ECSAllAtOnce" 10 | service_role_arn = aws_iam_role.code_deploy_ecs.arn 11 | 12 | deployment_style { 13 | deployment_option = "WITH_TRAFFIC_CONTROL" 14 | deployment_type = "BLUE_GREEN" 15 | } 16 | 17 | auto_rollback_configuration { 18 | enabled = true 19 | events = ["DEPLOYMENT_FAILURE"] 20 | } 21 | 22 | blue_green_deployment_config { 23 | deployment_ready_option { 24 | action_on_timeout = "CONTINUE_DEPLOYMENT" 25 | } 26 | 27 | terminate_blue_instances_on_deployment_success { 28 | action = "TERMINATE" 29 | termination_wait_time_in_minutes = 5 30 | } 31 | } 32 | 33 | ecs_service { 34 | cluster_name = aws_ecs_cluster.this.name 35 | service_name = aws_ecs_service.app1.name 36 | } 37 | 38 | load_balancer_info { 39 | target_group_pair_info { 40 | prod_traffic_route { 41 | listener_arns = [aws_lb_listener.app1.arn] 42 | } 43 | 44 | dynamic "target_group" { 45 | for_each = aws_lb_target_group.app1 46 | 47 | content { 48 | name = target_group.value.name 49 | } 50 | } 51 | } 52 | } 53 | } 54 | 55 | data "aws_iam_policy" "limited_code_deploy_default" { 56 | name = "AWSCodeDeployRoleForECSLimited" 57 | } 58 | 59 | resource "aws_iam_role" "code_deploy_ecs" { 60 | name = "${local.name}-code-deploy-ecs" 61 | assume_role_policy = data.aws_iam_policy_document.assume_code_deploy.json 62 | } 63 | 64 | resource "aws_iam_role_policy" "pass_ecs_roles" { 65 | role = aws_iam_role.code_deploy_ecs.id 66 | policy = data.aws_iam_policy_document.code_deploy_ecs.json 67 | } 68 | 69 | resource "aws_iam_role_policy_attachment" "limited_code_deploy_default" { 70 | role = aws_iam_role.code_deploy_ecs.id 71 | policy_arn = data.aws_iam_policy.limited_code_deploy_default.arn 72 | } 73 | 74 | data "aws_iam_policy_document" "assume_code_deploy" { 75 | statement { 76 | principals { 77 | type = "Service" 78 | identifiers = ["codedeploy.amazonaws.com"] 79 | } 80 | actions = ["sts:AssumeRole"] 81 | effect = "Allow" 82 | } 83 | } 84 | 85 | data "aws_iam_policy_document" "code_deploy_ecs" { 86 | statement { 87 | sid = "PassRolesInTaskDefinition" 88 | effect = "Allow" 89 | actions = ["iam:PassRole"] 90 | resources = [ 91 | aws_iam_role.ecs_task_execution.arn, 92 | aws_iam_role.ecs_task_app.arn 93 | ] 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /my_project/.infrastructure/workspaces/default/database.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | rds_db_port = var.rds_db_port == null ? 5432 : var.rds_db_port 3 | } 4 | 5 | # Create a random password 6 | resource "random_password" "db_password" { 7 | length = 16 8 | special = true 9 | numeric = true 10 | upper = true 11 | lower = true 12 | } 13 | 14 | # Create a security group for the RDS instance 15 | resource "aws_security_group" "db" { 16 | name = "${local.name}-db" 17 | description = "RDS security group" 18 | vpc_id = module.vpc.vpc_id 19 | 20 | # Allow access from the app 21 | ingress { 22 | from_port = local.rds_db_port 23 | to_port = local.rds_db_port 24 | security_groups = [aws_security_group.app.id] 25 | protocol = "tcp" 26 | } 27 | 28 | egress { 29 | from_port = 0 30 | to_port = 0 31 | protocol = "-1" 32 | cidr_blocks = ["0.0.0.0/0"] 33 | ipv6_cidr_blocks = ["::/0"] 34 | } 35 | } 36 | 37 | # Create RDS subnet group 38 | resource "aws_db_subnet_group" "db" { 39 | name = "${local.name}-rds-database" 40 | subnet_ids = module.vpc.private_subnets 41 | } 42 | 43 | # Create RDS instance 44 | resource "aws_db_instance" "db" { 45 | identifier = "${local.name}-database" 46 | username = var.rds_db_username 47 | password = random_password.db_password.result 48 | db_name = replace(local.name, "-", "_") 49 | allocated_storage = 20 50 | storage_type = "gp2" 51 | engine = "postgres" 52 | engine_version = "14.5" 53 | port = local.rds_db_port 54 | instance_class = var.rds_instance_type 55 | backup_retention_period = 7 56 | publicly_accessible = false 57 | storage_encrypted = var.rds_encrypt_at_rest 58 | multi_az = false 59 | db_subnet_group_name = aws_db_subnet_group.db.name 60 | vpc_security_group_ids = [aws_security_group.db.id] 61 | skip_final_snapshot = true 62 | } -------------------------------------------------------------------------------- /my_project/.infrastructure/workspaces/default/ecr.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ecr_repository" "this" { 2 | name = var.github_repository 3 | } 4 | 5 | # The following VPC endpoints are necessary to allow 6 | # access to the ECR repo in the subnet 7 | 8 | resource "aws_vpc_endpoint" "s3" { 9 | vpc_id = module.vpc.vpc_id 10 | service_name = "com.amazonaws.${var.aws_region}.s3" 11 | vpc_endpoint_type = "Gateway" 12 | route_table_ids = module.vpc.private_route_table_ids 13 | 14 | tags = { 15 | Name = "s3-endpoint" 16 | } 17 | } 18 | 19 | resource "aws_vpc_endpoint" "ecr_dkr" { 20 | vpc_id = module.vpc.vpc_id 21 | private_dns_enabled = true 22 | service_name = "com.amazonaws.${var.aws_region}.ecr.dkr" 23 | vpc_endpoint_type = "Interface" 24 | security_group_ids = [aws_security_group.vpc_endpoint.id] 25 | subnet_ids = module.vpc.private_subnets 26 | 27 | tags = { 28 | Name = "ecr-dkr-endpoint" 29 | } 30 | } 31 | 32 | resource "aws_vpc_endpoint" "ecr_api" { 33 | vpc_id = module.vpc.vpc_id 34 | private_dns_enabled = true 35 | service_name = "com.amazonaws.${var.aws_region}.ecr.api" 36 | vpc_endpoint_type = "Interface" 37 | security_group_ids = [aws_security_group.vpc_endpoint.id] 38 | subnet_ids = module.vpc.private_subnets 39 | 40 | tags = { 41 | Name = "ecr-api-endpoint" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /my_project/.infrastructure/workspaces/default/ecs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ecs_container_name = local.name 3 | } 4 | 5 | # Generate a release cookie for distributed Elixir nodes 6 | resource "random_password" "release_cookie" { 7 | length = 64 8 | special = true 9 | numeric = true 10 | upper = true 11 | lower = true 12 | } 13 | 14 | # Generate a random secret key base 15 | resource "random_password" "secret_key_base" { 16 | length = 64 17 | special = true 18 | numeric = true 19 | upper = true 20 | lower = true 21 | } 22 | 23 | # Create a security group for the load balancer 24 | resource "aws_security_group" "lb" { 25 | name = "${local.name}-load-balancer" 26 | description = "Controls access to the load balancer" 27 | vpc_id = module.vpc.vpc_id 28 | 29 | # app1 30 | ingress { 31 | from_port = 80 32 | to_port = 80 33 | protocol = "tcp" 34 | cidr_blocks = ["0.0.0.0/0"] 35 | ipv6_cidr_blocks = ["::/0"] 36 | } 37 | 38 | # app2 39 | ingress { 40 | from_port = 8080 41 | to_port = 8080 42 | protocol = "tcp" 43 | cidr_blocks = ["0.0.0.0/0"] 44 | ipv6_cidr_blocks = ["::/0"] 45 | } 46 | 47 | egress { 48 | from_port = 0 49 | to_port = 0 50 | protocol = "-1" 51 | cidr_blocks = ["0.0.0.0/0"] 52 | ipv6_cidr_blocks = ["::/0"] 53 | } 54 | } 55 | 56 | # Create a security group for the app containers 57 | resource "aws_security_group" "app" { 58 | name = "${local.name}-application" 59 | description = "Allow access from load balancer" 60 | vpc_id = module.vpc.vpc_id 61 | 62 | ingress { 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | security_groups = [aws_security_group.lb.id] 67 | } 68 | 69 | ingress { 70 | description = "Allow Elixir cluster to connect" 71 | from_port = 0 72 | to_port = 0 73 | protocol = "-1" 74 | self = true 75 | } 76 | 77 | egress { 78 | from_port = 0 79 | to_port = 0 80 | protocol = "-1" 81 | cidr_blocks = ["0.0.0.0/0"] 82 | ipv6_cidr_blocks = ["::/0"] 83 | } 84 | } 85 | 86 | # Application load balancer 87 | resource "aws_lb" "this" { 88 | name = local.name 89 | internal = false 90 | load_balancer_type = "application" 91 | security_groups = [aws_security_group.lb.id] 92 | subnets = module.vpc.public_subnets 93 | } 94 | 95 | # Target group for the load balancer for app1 96 | resource "aws_lb_target_group" "app1" { 97 | for_each = toset(["blue", "green"]) 98 | name = "${local.name}-app1-${each.key}" 99 | port = 80 100 | protocol = "HTTP" 101 | vpc_id = module.vpc.vpc_id 102 | target_type = "ip" 103 | 104 | health_check { 105 | enabled = true 106 | path = "/" 107 | matcher = "200" 108 | } 109 | } 110 | 111 | # Target group for the load balancer for app2 112 | resource "aws_lb_target_group" "app2" { 113 | name = "${local.name}-app2" 114 | port = 8080 115 | protocol = "HTTP" 116 | vpc_id = module.vpc.vpc_id 117 | target_type = "ip" 118 | 119 | health_check { 120 | enabled = true 121 | path = "/" 122 | matcher = "200" 123 | } 124 | } 125 | 126 | # Associate the load balancer with the app1 target group via a listener 127 | resource "aws_lb_listener" "app1" { 128 | load_balancer_arn = aws_lb.this.arn 129 | port = 80 130 | protocol = "HTTP" 131 | 132 | default_action { 133 | type = "forward" 134 | target_group_arn = aws_lb_target_group.app1["blue"].arn 135 | } 136 | 137 | lifecycle { 138 | ignore_changes = [ 139 | default_action # This will be controlled by CodeDeploy 140 | ] 141 | } 142 | } 143 | 144 | # Associate the load balancer with the app2 target group via a listener 145 | resource "aws_lb_listener" "app2" { 146 | load_balancer_arn = aws_lb.this.arn 147 | port = 8080 148 | protocol = "HTTP" 149 | 150 | default_action { 151 | type = "forward" 152 | target_group_arn = aws_lb_target_group.app2.arn 153 | } 154 | } 155 | 156 | # CloudWatch log group 157 | resource "aws_cloudwatch_log_group" "group" { 158 | name = "/ecs/${local.name}" 159 | } 160 | 161 | # Create the ECS cluster 162 | resource "aws_ecs_cluster" "this" { 163 | name = local.name 164 | } 165 | 166 | # Create the ECS task definition template 167 | resource "aws_ecs_task_definition" "app" { 168 | # GitHub CD pipeline will use this template for the Github Actions managed 169 | # task definition. Any modifications to this template will be reflected 170 | # in the next deployment (you may want to manually trigger it after terraform 171 | # apply). 172 | family = "${aws_ecs_cluster.this.name}-template" 173 | network_mode = "awsvpc" 174 | requires_compatibilities = ["FARGATE"] 175 | cpu = 1024 176 | memory = 2048 177 | 178 | execution_role_arn = aws_iam_role.ecs_task_execution.arn 179 | task_role_arn = aws_iam_role.ecs_task_app.arn 180 | 181 | container_definitions = jsonencode([ 182 | { 183 | essential = true 184 | image = "TO_BE_REPLACED" 185 | name = local.ecs_container_name 186 | 187 | portMappings = [ 188 | # app1 port 189 | { 190 | containerPort = 4000 191 | hostPort = 4000 192 | protocol = "tcp" 193 | }, 194 | 195 | # app2 port 196 | { 197 | containerPort = 4100 198 | hostPort = 4100 199 | protocol = "tcp" 200 | } 201 | ] 202 | 203 | environment = [ 204 | # The following config can be used if you don't want to specify a 205 | # domain and just use the whatever DNS hostname the load balancer has. 206 | # In a Phoenix app you could configure the url at runtime with: 207 | # 208 | # uri = URI.parse(System.get_env("MY_APP_HOST", "example.com")) 209 | # 210 | # config :my_app_web, MyAppWeb.Endpoint, 211 | # url: [host: uri.host, port: uri.port || 81], 212 | # 213 | # { 214 | # name = "MY_APP_HOST" 215 | # value = aws_lb.this.dns_name 216 | # } 217 | { 218 | name = "DNS_POLL_QUERY" 219 | value = "${local.ecs_container_name}.${aws_service_discovery_private_dns_namespace.app.name}" 220 | } 221 | ] 222 | 223 | secrets = [ 224 | { 225 | name = "RELEASE_COOKIE" 226 | valueFrom = aws_secretsmanager_secret_version.release_cookie.arn 227 | }, 228 | { 229 | name = "SECRET_KEY_BASE" 230 | valueFrom = aws_secretsmanager_secret_version.secret_key_base.arn 231 | }, 232 | { 233 | name = "DATABASE_CREDENTIALS" 234 | valueFrom = aws_secretsmanager_secret_version.db_credentials.arn 235 | } 236 | ] 237 | 238 | logConfiguration = { 239 | logDriver = "awslogs" 240 | options = { 241 | awslogs-group = aws_cloudwatch_log_group.group.name 242 | awslogs-region = var.aws_region 243 | awslogs-stream-prefix = "stdout" 244 | } 245 | } 246 | } 247 | ]) 248 | } 249 | 250 | # Create the app1 ECS service 251 | resource "aws_ecs_service" "app1" { 252 | name = "${local.name}-app1" 253 | cluster = aws_ecs_cluster.this.id 254 | task_definition = aws_ecs_task_definition.app.arn 255 | desired_count = 1 256 | launch_type = "FARGATE" 257 | 258 | deployment_controller { 259 | type = "CODE_DEPLOY" 260 | } 261 | 262 | load_balancer { 263 | target_group_arn = aws_lb_target_group.app1["blue"].arn 264 | container_name = local.ecs_container_name 265 | container_port = 4000 266 | } 267 | 268 | network_configuration { 269 | security_groups = [aws_security_group.app.id] 270 | subnets = module.vpc.private_subnets 271 | assign_public_ip = false 272 | } 273 | 274 | # Enable elixir cluster discovery 275 | service_registries { 276 | registry_arn = aws_service_discovery_service.app.arn 277 | container_name = local.ecs_container_name 278 | } 279 | 280 | lifecycle { 281 | ignore_changes = [ 282 | task_definition, # Managed by GitHub CD pipeline 283 | load_balancer # Managed by CodeDeploy for Blue Green deployments 284 | ] 285 | } 286 | } 287 | 288 | # Create the app2 ECS service 289 | resource "aws_ecs_service" "app2" { 290 | name = "${local.name}-app2" 291 | cluster = aws_ecs_cluster.this.id 292 | task_definition = aws_ecs_task_definition.app.arn 293 | desired_count = 1 294 | launch_type = "FARGATE" 295 | 296 | load_balancer { 297 | target_group_arn = aws_lb_target_group.app2.arn 298 | container_name = local.ecs_container_name 299 | container_port = 4100 300 | } 301 | 302 | network_configuration { 303 | security_groups = [aws_security_group.app.id] 304 | subnets = module.vpc.private_subnets 305 | assign_public_ip = false 306 | } 307 | 308 | # Enable elixir cluster discovery 309 | service_registries { 310 | registry_arn = aws_service_discovery_service.app.arn 311 | container_name = local.ecs_container_name 312 | } 313 | 314 | lifecycle { 315 | ignore_changes = [ 316 | task_definition # Managed by GitHub CD pipeline 317 | ] 318 | } 319 | } 320 | 321 | # Allows the application container to pass logs to CloudWatch 322 | resource "aws_vpc_endpoint" "logs" { 323 | vpc_id = module.vpc.vpc_id 324 | private_dns_enabled = true 325 | service_name = "com.amazonaws.${var.aws_region}.logs" 326 | vpc_endpoint_type = "Interface" 327 | security_group_ids = [aws_security_group.vpc_endpoint.id] 328 | subnet_ids = module.vpc.private_subnets 329 | 330 | tags = { 331 | Name = "logs-endpoint" 332 | } 333 | } 334 | 335 | # ECS execution and task roles 336 | data "aws_iam_policy_document" "assume_ecs" { 337 | statement { 338 | actions = ["sts:AssumeRole"] 339 | principals { 340 | type = "Service" 341 | identifiers = ["ecs.amazonaws.com", "ecs-tasks.amazonaws.com"] 342 | } 343 | } 344 | } 345 | resource "aws_iam_policy" "ecs_task_execution" { 346 | name = "${local.name}-ecs-task-execution" 347 | policy = < .release/entrypoint.sh 26 | bin/my_project eval "MyProject.Release.migrate()" 27 | bin/my_project start 28 | SH 29 | ``` 30 | 31 | Now you are ready to deploy to AWS! 32 | --------------------------------------------------------------------------------