├── .DS_Store
├── .gitignore
├── README.md
├── architecture.png
├── jupiter-website-ecs
├── .DS_Store
├── .terraform.lock.hcl
├── backend.tf
├── main.tf
└── variables.tf
└── modules
├── .DS_Store
├── alb
├── main.tf
├── outputs.tf
└── variables.tf
├── asg
├── main.tf
└── variables.tf
├── ecs-tasks-execution-role
├── main.tf
├── outputs.tf
└── variables.tf
├── ecs
├── main.tf
├── outputs.tf
└── variables.tf
├── nat-gateway
├── main.tf
└── variables.tf
├── security-groups
├── main.tf
├── outputs.tf
└── variables.tf
└── vpc
├── main.tf
├── outputs.tf
└── variables.tf
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoyGeagea/aws-ecs-static-website-terraform/94576a1c90c918a0ac4eddcb814ce854b3b1775d/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 | crash.*.log
11 |
12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
13 | # password, private keys, and other secrets. These should not be part of version
14 | # control as they are data points which are potentially sensitive and subject
15 | # to change depending on the environment.
16 | *.tfvars
17 | *.tfvars.json
18 |
19 | # Ignore override files as they are usually used to override resources locally and so
20 | # are not checked in
21 | override.tf
22 | override.tf.json
23 | *_override.tf
24 | *_override.tf.json
25 |
26 | # Include override files you do wish to add to version control using negated pattern
27 | # !example_override.tf
28 |
29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
30 | # example: *tfplan*
31 |
32 | # Ignore CLI configuration files
33 | .terraformrc
34 | terraform.rc
35 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AWS - Host static website using Elastic Container Service (ECS) and Terraform
2 |
3 | ## Overview
4 | In this Lab, we are going to develop a static website and deploy it on AWS Elastic Container Service using Terraform. The architecture comprises the following components and services to ensure high availability and fault tolerance:
5 | 
6 |
7 | ## Prerequisites
8 | - Have basic Docker knowledge
9 | - Have basic AWS knowledge
10 | - Have basic terraform knowledge
11 | - Developed the following project before: https://github.com/RoyGeagea/aws-ecs-static-website
12 |
13 | ## Get Started
14 |
15 | ### 1- Install Terraform
16 | Visit https://developer.hashicorp.com/terraform/downloads?product_intent=terraform
17 |
18 | ### 2- Create Github free account
19 | Visit https://github.com
20 |
21 | ### 3- Install Git
22 | Visit https://git-scm.com/book/en/v2/Getting-Started-Installing-Git
23 |
24 | ### 4- Create SSH key
25 | Visit https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent
26 |
27 | ### 5- Add the SSH key to your GitHub account
28 | Visit https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account
29 |
30 | ### 6- Install Visual Studio
31 | Visit https://visualstudio.microsoft.com/downloads/
32 |
33 | ### 7- Install Terraform extensions on Visual Studio
34 | To install the Terraform extensions in Visual Studio, follow these steps:
35 |
36 | 1) Open Visual Studio.
37 | 2) Go to the "Extensions" section from the left bar
38 | 3) Search and install the following 2 extensions
39 | - `HashiCorp Terraform` extension by HashiCorp
40 | - `Terraform` by Anton Kulikov
41 |
42 | ### 8- Install AWS CLI
43 | Visit https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
44 |
45 | ### 9- Create an IAM User with Admin Policy
46 | 1) Open the IAM (Identity and Access Management) console.
47 | 2) In the IAM console, click on "Users" in the left sidebar.
48 | 3) Click on the "Add user" button to create a new IAM user.
49 | 4) Provide a username for the new IAM user. Enable programmatic access.
50 | 5) Under "Set permissions", select "Attach existing policies directly".
51 | 6) In the policy search bar, type "AdministratorAccess" and select the policy named "AdministratorAccess" from the list. This policy grants full administrative access to the user.
52 | 7) Click on the "Next: Tags" button if you want to add tags to the user (optional).
53 | 8) Click on the "Next: Review" button to review the user configuration.
54 | 9) Review the user details and permissions to ensure they are correct.
55 | 10) Once you have reviewed the settings, click on the "Create user" button.
56 |
57 | The IAM user with the Admin policy is now created. On the final screen, you will see the user details, including the user's access key ID and secret access key if programmatic access was enabled. Make sure to securely store these credentials, as they will be needed to access AWS services programmatically.
58 |
59 | ### 10- Set up AWS CLI with the newly created user
60 | To set up AWS CLI with the newly created IAM user, follow these steps:
61 |
62 | 1) Open a terminal or command prompt on your local machine.
63 | 2) Run the following command to configure the AWS CLI:
64 | ```console
65 | $ aws configure
66 | ```
67 | You will be prompted to enter the AWS Access Key ID and Secret Access Key for the IAM user. Enter the values obtained during the creation of the IAM user with the Admin policy.
68 |
69 | When prompted for the Default region name, enter the AWS region where you want to perform your operations (e.g., us-east-1, eu-west-2). You can find the region codes in the AWS documentation.
70 |
71 | For the Default output format, you can leave it blank or enter a preferred format such as json, text, or table. The default value is json.
72 |
73 | Once you have entered the required information, the AWS CLI will be configured with the IAM user's credentials.
74 | To test the configuration, you can run a simple AWS CLI command, such as:
75 | ```console
76 | $ aws s3 ls
77 | ```
78 | This command lists the S3 buckets in your AWS account. If the configuration is successful, you should see the list of S3 buckets associated with your account.
79 |
80 | Now you're ready to use the AWS CLI with the IAM user's credentials to interact with your AWS resources.
81 |
82 | ### 11- Create S3 bucket to store Terraform state file
83 | Terraform maintains a record of resource states in a dedicated file known as the state file. By persistently storing the state of deployed resources, Terraform enables efficient subsequent deployments by leveraging the knowledge of the previous resource states. This allows Terraform to accurately determine the necessary changes and updates required to align the infrastructure with the desired configuration.
84 |
85 | Let's create an S3 bucket with versioning enabled to store the Terraform state file:
86 |
87 | 1) Open the Amazon S3 console by searching for "S3" and click on the "Create bucket" button.
88 | 2) Provide a unique and meaningful name for your bucket. Bucket names must be globally unique across all AWS accounts.
89 | 3) Select the AWS Region where you want to create the bucket.
90 | 4) Under "Configure options" leave the default settings as they are.
91 | 5) Scroll down to the "Set permissions" section and expand it.
92 | 6) Leave the default settings for "Block all public access" and "Bucket versioning" enabled. `Bucket versioning is necessary for Terraform state file management.`
93 | 7) Scroll down and click on the "Create bucket" button to create the S3 bucket.
94 |
95 | ### 12- Deploy the web server
96 | The Terraform code for this project is hosted in this repository, and it's made of [modules](modules) and [jupiter-website-ecs](jupiter-website-ecs) folders.
97 |
98 | To deploy the web server:
99 | 1) Enter the `jupiter-website-ecs` folder, by executing the below command:
100 | ```console
101 | $ cd jupiter-website-ecs
102 | ```
103 | 2) Open the `terraform.tfvars` file and locate the `container_image` variable. Replace its current value with the URI of your ECR Image. For detailed instructions, you can refer to the documentation available at https://github.com/RoyGeagea/aws-ecs-static-website.
104 | 3) Next, navigate to the `backend.tf` file. Update the value associated with the S3 bucket you intend to use for this project. Make sure to enter the correct bucket name and any other relevant configurations necessary for your deployment.
105 | 4) Deploy the infrastructure:
106 | ```console
107 | $ terraform init
108 | $ terraform apply
109 | ```
110 | 5) Wait few minutes until the ECS service is deployed, and then test from the ALB DNS name.
111 |
112 | To destroy the infrastructure you can execute:
113 | ```console
114 | $ terraform destroy
115 | ```
116 |
117 | ## Summary
118 |
119 | Terraform revolutionizes infrastructure provisioning with its declarative approach and automation capabilities. Seamlessly integrating with AWS, Terraform empowers users to create and modify cloud resources efficiently. By leveraging Terraform's infrastructure-as-code paradigm, the project achieved streamlined deployments, scalability, and rapid application delivery. The benefits include enhanced efficiency, reduced errors, infrastructure standardization, and the ability to easily replicate and manage infrastructure across environments. With Terraform, organizations can focus on innovation and application development while harnessing the power of cloud-native infrastructure.
120 |
--------------------------------------------------------------------------------
/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoyGeagea/aws-ecs-static-website-terraform/94576a1c90c918a0ac4eddcb814ce854b3b1775d/architecture.png
--------------------------------------------------------------------------------
/jupiter-website-ecs/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoyGeagea/aws-ecs-static-website-terraform/94576a1c90c918a0ac4eddcb814ce854b3b1775d/jupiter-website-ecs/.DS_Store
--------------------------------------------------------------------------------
/jupiter-website-ecs/.terraform.lock.hcl:
--------------------------------------------------------------------------------
1 | # This file is maintained automatically by "terraform init".
2 | # Manual edits may be lost in future updates.
3 |
4 | provider "registry.terraform.io/hashicorp/aws" {
5 | version = "5.4.0"
6 | hashes = [
7 | "h1:ny1YPz2LiHTasDVNh6/HEvh1c9+TN/ftgAHh84bmy1E=",
8 | "zh:1db5f81089216831bb0fdff9ddc3772efa133397c66ec276bc75b96eec06e23f",
9 | "zh:26fe5fdf399192b5724d21854fbec650c158f8ee9eb1dc52a50f7da0f2bc07ac",
10 | "zh:2946d9e333b1efe01588ee9f9771169fd3c3a4a7cb78ed8f91e8b3efd1a73850",
11 | "zh:36ed69e8d3029332c8a52a70940f714fd579b9fd95f5569cc010ef11162f5bf7",
12 | "zh:46ba5ad1c3a3ef98c346356cfa4bdd9c2501c661c2513bb92f4413f2482fb24b",
13 | "zh:46c10aaa9672b54a14b0e0effdd6ecd9b8a539b3bfe273ac54111e7352a7bb4b",
14 | "zh:47d7f57bcbe4fba2f960ab6c4228c5e9e586be2f233a8baa8962b51a63337179",
15 | "zh:47e41c198439ba1c4d933f808b6f47e518f8f0aae25ca42abcac97f149121e90",
16 | "zh:526c5834de71654ee14039cb973322bf5032cb684a2a113b48fb48a0584f46f3",
17 | "zh:6169316517b95677819ba2904dcea204fb9b55e868348e906af9164104fe7198",
18 | "zh:7c063ef2b8d69a8db7e8bf0dcd45793ede22b259b30464ed114d330df304cdbb",
19 | "zh:87c4f2faca636715a08be3121d26b3354415401eab89349077ca9436a0822c23",
20 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
21 | "zh:b184b8a268f45258edd27d389ca793708f1bc3ee4d6706d154a45e93deaddde1",
22 | "zh:ba1a998cbf4b639fa3e04b9069f0f5a289662457940726a8a51c81df400aa852",
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/jupiter-website-ecs/backend.tf:
--------------------------------------------------------------------------------
1 | # store the terraform state file in s3
2 | terraform {
3 | backend "s3" {
4 | bucket =
5 | key = "jupiter-website-ecs.tfstate"
6 | region = "us-east-1"
7 | profile = "default"
8 | }
9 | }
--------------------------------------------------------------------------------
/jupiter-website-ecs/main.tf:
--------------------------------------------------------------------------------
1 | # configure aws provider
2 | provider "aws" {
3 | region = var.region
4 | profile = "default"
5 | }
6 |
7 | # create VPC
8 | module "vpc" {
9 | source = "../modules/vpc"
10 | region = var.region
11 | project_name = var.project_name
12 | vpc_cidr = var.vpc_cidr
13 | public_subnet_az1_cidr = var.public_subnet_az1_cidr
14 | public_subnet_az2_cidr = var.public_subnet_az2_cidr
15 | private_app_subnet_az1_cidr = var.private_app_subnet_az1_cidr
16 | private_app_subnet_az2_cidr = var.private_app_subnet_az2_cidr
17 | }
18 |
19 | # create nat gateways
20 | module "nat_gateway" {
21 | source = "../modules/nat-gateway"
22 | public_subnet_az1_id = module.vpc.public_subnet_az1_id
23 | internet_gateway = module.vpc.internet_gateway
24 | public_subnet_az2_id = module.vpc.public_subnet_az2_id
25 | vpc_id = module.vpc.vpc_id
26 | private_app_subnet_az1_id = module.vpc.private_app_subnet_az1_id
27 | private_app_subnet_az2_id = module.vpc.private_app_subnet_az2_id
28 | }
29 |
30 | # create security groups
31 | module "security_group" {
32 | source = "../modules/security-groups"
33 | vpc_id = module.vpc.vpc_id
34 | }
35 |
36 | # create iam role
37 | module "ecs_task_execution_role" {
38 | source = "../modules/ecs-tasks-execution-role"
39 | project_name = module.vpc.project_name
40 | }
41 |
42 | # create Application Load Balancer
43 | module "application_load_balancer" {
44 | source = "../modules/alb"
45 | project_name = var.project_name
46 | alb_security_group_id = module.security_group.alb_security_group_id
47 | public_subnet_az1_id = module.vpc.public_subnet_az1_id
48 | public_subnet_az2_id = module.vpc.public_subnet_az2_id
49 | vpc_id = module.vpc.vpc_id
50 | }
51 |
52 | # create ecs service
53 | module "ecs" {
54 | source = "../modules/ecs"
55 | project_name = var.project_name
56 | ecs_tasks_execution_role_arn = module.ecs_task_execution_role.ecs_tasks_execution_role_arn
57 | container_image = var.container_image
58 | region = var.region
59 | private_app_subnet_az1_id = module.vpc.private_app_subnet_az1_id
60 | private_app_subnet_az2_id = module.vpc.private_app_subnet_az2_id
61 | ecs_security_group_id = module.security_group.ecs_security_group_id
62 | alb_target_group_arn = module.application_load_balancer.alb_target_group_arn
63 | }
64 |
65 | # create auto scaling group
66 | module "auto_scaling_group" {
67 | source = "../modules/asg"
68 | ecs_cluster_name = module.ecs.ecs_cluster_name
69 | ecs_service_name = module.ecs.ecs_service_name
70 | }
--------------------------------------------------------------------------------
/jupiter-website-ecs/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "project_name" {}
3 | variable "vpc_cidr" {}
4 | variable "public_subnet_az1_cidr" {}
5 | variable "public_subnet_az2_cidr" {}
6 | variable "private_app_subnet_az1_cidr" {}
7 | variable "private_app_subnet_az2_cidr" {}
8 | variable "container_image" {}
--------------------------------------------------------------------------------
/modules/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoyGeagea/aws-ecs-static-website-terraform/94576a1c90c918a0ac4eddcb814ce854b3b1775d/modules/.DS_Store
--------------------------------------------------------------------------------
/modules/alb/main.tf:
--------------------------------------------------------------------------------
1 | # create application load balancer
2 | resource "aws_lb" "application_load_balancer" {
3 | name = "${var.project_name}-alb"
4 | internal = false
5 | load_balancer_type = "application"
6 | security_groups = [var.alb_security_group_id]
7 | subnets = [var.public_subnet_az1_id, var.public_subnet_az2_id]
8 | enable_deletion_protection = false
9 |
10 | tags = {
11 | Name = "${var.project_name}-alb"
12 | }
13 | }
14 |
15 | # create target group
16 | resource "aws_lb_target_group" "alb_target_group" {
17 | name = "${var.project_name}-tg"
18 | target_type = "ip"
19 | port = 80
20 | protocol = "HTTP"
21 | vpc_id = var.vpc_id
22 |
23 | health_check {
24 | enabled = true
25 | interval = 300
26 | path = "/"
27 | timeout = 60
28 | matcher = 200
29 | healthy_threshold = 5
30 | unhealthy_threshold = 5
31 | }
32 |
33 | lifecycle {
34 | create_before_destroy = true
35 | }
36 | }
37 |
38 | # create a listener on port 80 with forward action
39 | resource "aws_lb_listener" "alb_https_listener" {
40 | load_balancer_arn = aws_lb.application_load_balancer.arn
41 | port = 80
42 | protocol = "HTTP"
43 |
44 | default_action {
45 | type = "forward"
46 | target_group_arn = aws_lb_target_group.alb_target_group.arn
47 | }
48 | }
--------------------------------------------------------------------------------
/modules/alb/outputs.tf:
--------------------------------------------------------------------------------
1 | output "alb_target_group_arn" {
2 | value = aws_lb_target_group.alb_target_group.arn
3 | }
4 |
5 | output "alb_dns_name" {
6 | value = aws_lb.application_load_balancer.dns_name
7 | }
8 |
9 | output "alb_zone_id" {
10 | value = aws_lb.application_load_balancer.zone_id
11 | }
--------------------------------------------------------------------------------
/modules/alb/variables.tf:
--------------------------------------------------------------------------------
1 | variable "project_name" {}
2 | variable "alb_security_group_id" {}
3 | variable "public_subnet_az1_id" {}
4 | variable "public_subnet_az2_id" {}
5 | variable "vpc_id" {}
6 |
--------------------------------------------------------------------------------
/modules/asg/main.tf:
--------------------------------------------------------------------------------
1 | # create an auto scaling group for the ecs service
2 | resource "aws_appautoscaling_target" "ecs_target" {
3 | max_capacity = 4
4 | min_capacity = 1
5 | resource_id = "service/${var.ecs_cluster_name}/${var.ecs_service_name}"
6 | scalable_dimension = "ecs:service:DesiredCount"
7 | service_namespace = "ecs"
8 | }
--------------------------------------------------------------------------------
/modules/asg/variables.tf:
--------------------------------------------------------------------------------
1 | variable "ecs_cluster_name" {}
2 | variable "ecs_service_name" {}
3 |
--------------------------------------------------------------------------------
/modules/ecs-tasks-execution-role/main.tf:
--------------------------------------------------------------------------------
1 | # generates an iam policy document in json format for the ecs task execution role
2 | data "aws_iam_policy_document" "ecs_tasks_execution_role_policy" {
3 | statement {
4 | actions = ["sts:AssumeRole"]
5 |
6 | principals {
7 | type = "Service"
8 | identifiers = ["ecs-tasks.amazonaws.com"]
9 | }
10 | }
11 | }
12 |
13 | # create an iam role
14 | resource "aws_iam_role" "ecs_tasks_execution_role" {
15 | name = "${var.project_name}-ecs-task-execution-role"
16 | assume_role_policy = data.aws_iam_policy_document.ecs_tasks_execution_role_policy.json
17 | }
18 |
19 | # attach ecs task execution policy to the iam role
20 | resource "aws_iam_role_policy_attachment" "ecs_tasks_execution_role" {
21 | role = aws_iam_role.ecs_tasks_execution_role.name
22 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
23 | }
--------------------------------------------------------------------------------
/modules/ecs-tasks-execution-role/outputs.tf:
--------------------------------------------------------------------------------
1 | output "ecs_tasks_execution_role_arn" {
2 | value = aws_iam_role.ecs_tasks_execution_role.arn
3 | }
--------------------------------------------------------------------------------
/modules/ecs-tasks-execution-role/variables.tf:
--------------------------------------------------------------------------------
1 | variable "project_name" {}
2 |
--------------------------------------------------------------------------------
/modules/ecs/main.tf:
--------------------------------------------------------------------------------
1 | # create ecs cluster
2 | resource "aws_ecs_cluster" "ecs_cluster" {
3 | name = "${var.project_name}-cluster"
4 |
5 | setting {
6 | name = "containerInsights"
7 | value = "disabled"
8 | }
9 | }
10 |
11 | # create cloudwatch log group
12 | resource "aws_cloudwatch_log_group" "log_group" {
13 | name = "/ecs/${var.project_name}-task-definition"
14 |
15 | lifecycle {
16 | create_before_destroy = true
17 | }
18 | }
19 |
20 | # create task definition
21 | resource "aws_ecs_task_definition" "ecs_task_definition" {
22 | family = "${var.project_name}-task-definition"
23 | execution_role_arn = var.ecs_tasks_execution_role_arn
24 | network_mode = "awsvpc"
25 | requires_compatibilities = ["FARGATE"]
26 | cpu = 2048
27 | memory = 4096
28 |
29 | runtime_platform {
30 | operating_system_family = "LINUX"
31 | cpu_architecture = "X86_64"
32 | }
33 |
34 | container_definitions = jsonencode([
35 | {
36 | name = "${var.project_name}-container"
37 | image = var.container_image
38 | essential = true
39 |
40 | portMappings = [
41 | {
42 | containerPort = 80
43 | hostPort = 80
44 | }
45 | ]
46 |
47 | ulimits = [
48 | {
49 | name = "nofile",
50 | softLimit = 1024000,
51 | hardLimit = 1024000
52 | }
53 | ]
54 |
55 | logConfiguration = {
56 | logDriver = "awslogs",
57 | options = {
58 | "awslogs-group" = aws_cloudwatch_log_group.log_group.name,
59 | "awslogs-region" = var.region,
60 | "awslogs-stream-prefix" = "ecs"
61 | }
62 | }
63 | }
64 | ])
65 | }
66 |
67 | # create ecs service
68 | resource "aws_ecs_service" "ecs_service" {
69 | name = "${var.project_name}-service"
70 | launch_type = "FARGATE"
71 | cluster = aws_ecs_cluster.ecs_cluster.id
72 | task_definition = aws_ecs_task_definition.ecs_task_definition.arn
73 | platform_version = "LATEST"
74 | desired_count = 1
75 | deployment_minimum_healthy_percent = 100
76 | deployment_maximum_percent = 200
77 |
78 | # task tagging configuration
79 | enable_ecs_managed_tags = false
80 | propagate_tags = "SERVICE"
81 |
82 | # vpc and security groups
83 | network_configuration {
84 | subnets = [var.private_app_subnet_az1_id, var.private_app_subnet_az2_id]
85 | security_groups = [var.ecs_security_group_id]
86 | assign_public_ip = false
87 | }
88 |
89 | # load balancing
90 | load_balancer {
91 | target_group_arn = var.alb_target_group_arn
92 | container_name = "${var.project_name}-container"
93 | container_port = 80
94 | }
95 | }
--------------------------------------------------------------------------------
/modules/ecs/outputs.tf:
--------------------------------------------------------------------------------
1 | output "ecs_cluster_name" {
2 | value = aws_ecs_cluster.ecs_cluster.name
3 | }
4 |
5 | output "ecs_service_name" {
6 | value = aws_ecs_service.ecs_service.name
7 | }
--------------------------------------------------------------------------------
/modules/ecs/variables.tf:
--------------------------------------------------------------------------------
1 | variable "project_name" {}
2 | variable "ecs_tasks_execution_role_arn" {}
3 | variable "container_image" {}
4 | variable "region" {}
5 | variable "private_app_subnet_az1_id" {}
6 | variable "private_app_subnet_az2_id" {}
7 | variable "ecs_security_group_id" {}
8 | variable "alb_target_group_arn" {}
9 |
--------------------------------------------------------------------------------
/modules/nat-gateway/main.tf:
--------------------------------------------------------------------------------
1 | # allocate elastic ip. this eip will be used for the nat-gateway in the public subnet az1
2 | # terraform aws allocate elastic ip
3 | resource "aws_eip" "eip_for_nat_gateway_az1" {
4 | vpc = true
5 |
6 | tags = {
7 | Name = "nat gateway az1 eip"
8 | }
9 | }
10 |
11 | # allocate elastic ip. this eip will be used for the nat-gateway in the public subnet az2
12 | # terraform aws allocate elastic ip
13 | resource "aws_eip" "eip_for_nat_gateway_az2" {
14 | vpc = true
15 |
16 | tags = {
17 | Name = "nat gateway az2 eip"
18 | }
19 | }
20 |
21 | # create nat gateway in public subnet az1
22 | # terraform create aws nat gateway
23 | resource "aws_nat_gateway" "nat_gateway_az1" {
24 | allocation_id = aws_eip.eip_for_nat_gateway_az1.id
25 | subnet_id = var.public_subnet_az1_id
26 |
27 | tags = {
28 | Name = "nat gateway az1"
29 | }
30 |
31 | # to ensure proper ordering, it is recommended to add an explicit dependency
32 | # on the internet gateway for the vpc.
33 | depends_on = [var.internet_gateway]
34 | }
35 |
36 | # create nat gateway in public subnet az2
37 | # terraform create aws nat gateway
38 | resource "aws_nat_gateway" "nat_gateway_az2" {
39 | allocation_id = aws_eip.eip_for_nat_gateway_az2.id
40 | subnet_id = var.public_subnet_az2_id
41 |
42 | tags = {
43 | Name = "nat gateway az2"
44 | }
45 |
46 | # to ensure proper ordering, it is recommended to add an explicit dependency
47 | # on the internet gateway for the vpc.
48 | depends_on = [var.internet_gateway]
49 | }
50 |
51 | # create private route table az1 and add route through nat gateway az1
52 | # terraform aws create route table
53 | resource "aws_route_table" "private_route_table_az1" {
54 | vpc_id = var.vpc_id
55 |
56 | route {
57 | cidr_block = "0.0.0.0/0"
58 | nat_gateway_id = aws_nat_gateway.nat_gateway_az1.id
59 | }
60 |
61 | tags = {
62 | Name = "private route table az1"
63 | }
64 | }
65 |
66 | # associate private app subnet az1 with private route table az1
67 | # terraform aws associate subnet with route table
68 | resource "aws_route_table_association" "private_app_subnet_az1_route_table_az1_association" {
69 | subnet_id = var.private_app_subnet_az1_id
70 | route_table_id = aws_route_table.private_route_table_az1.id
71 | }
72 |
73 | # create private route table az2 and add route through nat gateway az2
74 | # terraform aws create route table
75 | resource "aws_route_table" "private_route_table_az2" {
76 | vpc_id = var.vpc_id
77 |
78 | route {
79 | cidr_block = "0.0.0.0/0"
80 | nat_gateway_id = aws_nat_gateway.nat_gateway_az2.id
81 | }
82 |
83 | tags = {
84 | Name = "private route table az2"
85 | }
86 | }
87 |
88 | # associate private app subnet az2 with private route table az2
89 | # terraform aws associate subnet with route table
90 | resource "aws_route_table_association" "private_app_subnet_az2_route_table_az2_association" {
91 | subnet_id = var.private_app_subnet_az2_id
92 | route_table_id = aws_route_table.private_route_table_az2.id
93 | }
--------------------------------------------------------------------------------
/modules/nat-gateway/variables.tf:
--------------------------------------------------------------------------------
1 | variable "public_subnet_az1_id" {}
2 | variable "internet_gateway" {}
3 | variable "public_subnet_az2_id" {}
4 | variable "vpc_id" {}
5 | variable "private_app_subnet_az1_id" {}
6 | variable "private_app_subnet_az2_id" {}
7 |
--------------------------------------------------------------------------------
/modules/security-groups/main.tf:
--------------------------------------------------------------------------------
1 | # create security group for the application load balancer
2 | resource "aws_security_group" "alb_security_group" {
3 | name = "alb security group"
4 | description = "enable http/https access on port 80/443"
5 | vpc_id = var.vpc_id
6 |
7 | ingress {
8 | description = "http access"
9 | from_port = 80
10 | to_port = 80
11 | protocol = "tcp"
12 | cidr_blocks = ["0.0.0.0/0"]
13 | }
14 |
15 | ingress {
16 | description = "https access"
17 | from_port = 443
18 | to_port = 443
19 | protocol = "tcp"
20 | cidr_blocks = ["0.0.0.0/0"]
21 | }
22 |
23 | egress {
24 | from_port = 0
25 | to_port = 0
26 | protocol = -1
27 | cidr_blocks = ["0.0.0.0/0"]
28 | }
29 |
30 | tags = {
31 | Name = "alb security group"
32 | }
33 | }
34 |
35 | # create security group for the container
36 | resource "aws_security_group" "ecs_security_group" {
37 | name = "ecs security group"
38 | description = "enable http/https access on port 80/443 via alb sg"
39 | vpc_id = var.vpc_id
40 |
41 | ingress {
42 | description = "http access"
43 | from_port = 80
44 | to_port = 80
45 | protocol = "tcp"
46 | security_groups = [aws_security_group.alb_security_group.id]
47 | }
48 |
49 | ingress {
50 | description = "https access"
51 | from_port = 443
52 | to_port = 443
53 | protocol = "tcp"
54 | security_groups = [aws_security_group.alb_security_group.id]
55 | }
56 |
57 | egress {
58 | from_port = 0
59 | to_port = 0
60 | protocol = -1
61 | cidr_blocks = ["0.0.0.0/0"]
62 | }
63 |
64 | tags = {
65 | Name = "ecs security group"
66 | }
67 | }
--------------------------------------------------------------------------------
/modules/security-groups/outputs.tf:
--------------------------------------------------------------------------------
1 | output "alb_security_group_id" {
2 | value = aws_security_group.alb_security_group.id
3 | }
4 |
5 | output "ecs_security_group_id" {
6 | value = aws_security_group.ecs_security_group.id
7 | }
--------------------------------------------------------------------------------
/modules/security-groups/variables.tf:
--------------------------------------------------------------------------------
1 | variable "vpc_id" {}
2 |
--------------------------------------------------------------------------------
/modules/vpc/main.tf:
--------------------------------------------------------------------------------
1 | # create vpc
2 | resource "aws_vpc" "vpc" {
3 | cidr_block = var.vpc_cidr
4 | instance_tenancy = "default"
5 | enable_dns_hostnames = true
6 |
7 | tags = {
8 | Name = "${var.project_name}-vpc"
9 | }
10 | }
11 |
12 | # create internet gateway and attach it to vpc
13 | resource "aws_internet_gateway" "internet_gateway" {
14 | vpc_id = aws_vpc.vpc.id
15 |
16 | tags = {
17 | Name = "${var.project_name}-igw"
18 | }
19 | }
20 |
21 | # use data source to get all avalablility zones in region
22 | data "aws_availability_zones" "available_zones" {}
23 |
24 | # create public subnet az1
25 | resource "aws_subnet" "public_subnet_az1" {
26 | vpc_id = aws_vpc.vpc.id
27 | cidr_block = var.public_subnet_az1_cidr
28 | availability_zone = data.aws_availability_zones.available_zones.names[0]
29 | map_public_ip_on_launch = true
30 |
31 | tags = {
32 | Name = "public subnet az1"
33 | }
34 | }
35 |
36 | # create public subnet az2
37 | resource "aws_subnet" "public_subnet_az2" {
38 | vpc_id = aws_vpc.vpc.id
39 | cidr_block = var.public_subnet_az2_cidr
40 | availability_zone = data.aws_availability_zones.available_zones.names[1]
41 | map_public_ip_on_launch = true
42 |
43 | tags = {
44 | Name = "public subnet az2"
45 | }
46 | }
47 |
48 | # create route table and add public route
49 | resource "aws_route_table" "public_route_table" {
50 | vpc_id = aws_vpc.vpc.id
51 |
52 | route {
53 | cidr_block = "0.0.0.0/0"
54 | gateway_id = aws_internet_gateway.internet_gateway.id
55 | }
56 |
57 | tags = {
58 | Name = "public route table"
59 | }
60 | }
61 |
62 | # associate public subnet az1 to "public route table"
63 | resource "aws_route_table_association" "public_subnet_az1_route_table_association" {
64 | subnet_id = aws_subnet.public_subnet_az1.id
65 | route_table_id = aws_route_table.public_route_table.id
66 | }
67 |
68 | # associate public subnet az2 to "public route table"
69 | resource "aws_route_table_association" "public_subnet_az2_route_table_association" {
70 | subnet_id = aws_subnet.public_subnet_az2.id
71 | route_table_id = aws_route_table.public_route_table.id
72 | }
73 |
74 | # create private app subnet az1
75 | resource "aws_subnet" "private_app_subnet_az1" {
76 | vpc_id = aws_vpc.vpc.id
77 | cidr_block = var.private_app_subnet_az1_cidr
78 | availability_zone = data.aws_availability_zones.available_zones.names[0]
79 | map_public_ip_on_launch = false
80 |
81 | tags = {
82 | Name = "private app subnet az1"
83 | }
84 | }
85 |
86 | # create private app subnet az2
87 | resource "aws_subnet" "private_app_subnet_az2" {
88 | vpc_id = aws_vpc.vpc.id
89 | cidr_block = var.private_app_subnet_az2_cidr
90 | availability_zone = data.aws_availability_zones.available_zones.names[1]
91 | map_public_ip_on_launch = false
92 |
93 | tags = {
94 | Name = "private app subnet az2"
95 | }
96 | }
--------------------------------------------------------------------------------
/modules/vpc/outputs.tf:
--------------------------------------------------------------------------------
1 | output "region" {
2 | value = var.region
3 | }
4 |
5 | output "project_name" {
6 | value = var.project_name
7 | }
8 |
9 | output "vpc_id" {
10 | value = aws_vpc.vpc.id
11 | }
12 |
13 | output "public_subnet_az1_id" {
14 | value = aws_subnet.public_subnet_az1.id
15 | }
16 |
17 | output "public_subnet_az2_id" {
18 | value = aws_subnet.public_subnet_az2.id
19 | }
20 |
21 | output "private_app_subnet_az1_id" {
22 | value = aws_subnet.private_app_subnet_az1.id
23 | }
24 |
25 | output "private_app_subnet_az2_id" {
26 | value = aws_subnet.private_app_subnet_az2.id
27 | }
28 |
29 | output "internet_gateway" {
30 | value = aws_internet_gateway.internet_gateway
31 | }
--------------------------------------------------------------------------------
/modules/vpc/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "project_name" {}
3 | variable "vpc_cidr" {}
4 | variable "public_subnet_az1_cidr" {}
5 | variable "public_subnet_az2_cidr" {}
6 | variable "private_app_subnet_az1_cidr" {}
7 | variable "private_app_subnet_az2_cidr" {}
--------------------------------------------------------------------------------