├── .github
├── FUNDING.yml
└── workflows
│ ├── branch.yml
│ └── main.yml
├── README.md
├── acm.tf
├── alb.tf
├── asg.tf
├── aws_budget.tf
├── cloudfront.tf
├── cloudfront
└── index.js
├── cloudmap.tf
├── codedeploy.tf
├── data.tf
├── ec2_profile.tf
├── efs.tf
├── elasticache.tf
├── eventbridge.tf
├── lambda.tf
├── lambda
└── image_optimization
│ ├── index.mjs
│ └── package.json
├── opensearch.tf
├── production.auto.tfvars.template
├── provider.tf
├── rabbitmq.tf
├── random.tf
├── rds.tf
├── route53.tf
├── s3.tf
├── security_groups.tf
├── ses.tf
├── sns.tf
├── sqs.tf
├── ssm_cloudmap_deregister.tf
├── ssm_configuration.tf
├── ssm_cwa_config.tf
├── ssm_initialization.tf
├── ssm_parameterstore.tf
├── ssm_release.tf
├── ssm_role.tf
├── variables.tf
├── vpc_network.tf
├── waf.tf
└── waf
├── blacklist.csv.template
└── whitelist.csv.template
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | custom: ["https://www.paypal.com/paypalme/magenx"]
2 |
--------------------------------------------------------------------------------
/.github/workflows/branch.yml:
--------------------------------------------------------------------------------
1 | name: Create Template Branches
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | jobs:
7 | create-orphan-branches:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Checkout repository
11 | uses: actions/checkout@v4
12 |
13 | - name: Set Git user identity
14 | run: |
15 | git config --global user.name "Magenx Cloud"
16 | git config --global user.email "admin@magenx.com"
17 | - name: Create template branches
18 | run: |
19 | git remote set-url origin "https://x-access-token:$GITHUB_TOKEN@github.com/magenx/Magento-2-aws-cluster-terraform.git"
20 | branches=("module")
21 | for branch in "${branches[@]}"; do
22 | git checkout --orphan "${branch}"
23 | git rm -rf . || true
24 | git commit --allow-empty -m "${branch}"
25 | git push origin "${branch}"
26 | done
27 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: Replace vars
2 | on: workflow_dispatch
3 |
4 | jobs:
5 | replace-vars:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - name: Checkout code
9 | uses: actions/checkout@v4
10 | with:
11 | ref: ecs_v5
12 | - name: Replace var.name
13 | run: |
14 | find . -type f -exec sed -i 's/var\.app\[\("\)\([a-z_]*\)\("\)\]/var.\2/g' {} \;
15 | - name: Commit changes
16 | run: |
17 | git config --local user.name "magenx"
18 | git config --local user.email "admin@magenx.com"
19 | git add .
20 | git commit -m "Replaced var.name"
21 | git push origin ecs_v5
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Magento 2 [auto scaling](https://aws.amazon.com/autoscaling/) cluster with Terraform on AWS cloud only
2 | > Deploy a full-scale secure and flexible e-commerce infrastructure based on Magento 2 in a matter of seconds.
3 | > Enterprise-grade solution for companies of all sizes, B2B B2C, providing the best customer experience.
4 | > use [Fastly, Cloudflare, Section](../../tree/fastly_v5) in front
5 |
6 |
7 | 
8 |
9 |
10 |
11 | ## AWS Graviton3 Processor - Enabling the best performance in EC2:
12 | 
13 |
14 | > [Amazon EC2 C7g instances upgrade](https://aws.amazon.com/ec2/instance-types/c7g/)
15 | > Best price performance for compute-intensive workloads in Amazon EC2
16 |
17 |
18 |
19 | ## ❓ Why we need Adobe Commerce Cloud alternative:
20 | The biggest issue is that ACC pricing based on GMV % and AOV %, you overpay up to 80%, while the bill between Adobe and AWS remains at a minimum. With this approach, you invest money in the development of a third-party business, but not your own.
21 | Why spend so much money without having control over your business in the cloud?
22 | Configuring your own infrastructure these days is the most profitable way. You manage the resources, you have a complete overview how it works and you have full control over the money invested in your own infrastructure. At any time you can make changes to both infrastructure and application design without thinking about restrictions, 3rd party platform limitations and unforeseen costs. There are no hidden bills and payments for excess resources, which, as a result, you will not need.
23 |
24 | Adobe Commerce Cloud has lots of technical problems due to the fact that many services compete on the same server and share the processor time, memory, network and I/O. Bad architectural solution using monolitic servers, not cloud native solution, that was not made specifically for Magento, but adapted in rush using many wrappers, with manual pseudo scaling and 48 hours to 5 days to accept and deploy new settings.
25 |
26 | ```
27 | Obviously, PaaS intermediaries also use AWS Cloud. But concealing its cheap solutions with a marketing,
28 | trying to hook you up on a dodgy contract and making you pay 10 times more.
29 | ```
30 |
31 |
32 | ## AWS cloud account pros:
33 | - [x] Open source Magento
34 | - [x] Pay as You Go
35 | - [x] Transparent billing
36 | - [x] No draconian contracts
37 | - [x] No sudden overage charges
38 | - [x] No hardware restrictions
39 | - [x] No services limitations
40 | - [x] No hidden bottlenecks
41 | - [x] No time waste for [support tickets](https://devdocs.magento.com/cloud/project/services.html)
42 |
43 |
44 |
45 |
46 | **Amazon Web Services** offers an ecommerce cloud computing solutions to small and large businesses that want a flexible, secured, highly scalable infrastructure. All the technologies online retailers need to manage growth—instantly. With technologies like automatic scaling compute resources, networking, storage, content distribution, and a PCI-compliant environment, retailers can always provide great customer experiences and capitalize on growth opportunities.
47 |
48 | **The biggest benefits of using your own AWS Cloud account**: [Reserved Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-reserved-instances.html)
49 | Reserved Instances provide you with significant savings on your Amazon EC2 costs compared to On-Demand Instance pricing. With Savings Plans, you make a commitment to a consistent usage amount, measured in USD per hour. This provides you with the flexibility to use the instance configurations that best meet your needs and continue to save money.
50 |
51 |
52 |
53 | ## 📖 EC2 webstack custom configuration and Auto Scaling management
54 | [**Warm pools** for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html) - A warm pool gives you the ability to decrease latency for your applications. With warm pools, you no longer have to over-provision your Auto Scaling groups to manage latency in order to improve application performance. You have the option of keeping instances in the warm pool in one of two states: `Stopped` or `Running`. Keeping instances in a `Stopped` state is an effective way to minimize costs.
55 |
56 | NGINX is optimized and fully supported on the latest generation of 64-bit ARM Servers utilizing the architecture. PHP using socket connection.
57 |
58 | [**Debian 12** ARM 'bookworm'](https://aws.amazon.com/marketplace/pp/prodview-63gms6fbfaota), which will be supported for the next 5 years. Includes support for the very latest ARM-based server systems powered by certified 64-bit processors.
59 | Develop and deploy at scale. Webstack delivers top performance on ARM.
60 |
61 | [**AWS Systems Manager**](https://aws.amazon.com/systems-manager/) is an AWS service that you can use to view and control your infrastructure on AWS. Using the Systems Manager console, you can view operational data from multiple AWS EC2 instances and automate operational tasks across your AWS resources. Systems Manager helps you maintain security and compliance. No SSH connections from outside, no need to track passwords and private keys. If you are familiar with shell scripting, yaml and json syntax, create SSM Documents, this is the easiest and most complete way to send instructions to an instance to perform common automated configuration tasks and even run scripts after the instance starts. From default stack optimization to changing any application and service settings.
62 |
63 |
64 |
65 | ## Developer documentation to read:
66 | ```
67 | https://devdocs.magento.com/
68 | https://docs.aws.amazon.com/index.html
69 | https://www.terraform.io/docs/
70 | https://aws.amazon.com/cloudshell/
71 | ```
72 |
73 |
74 | The terraform configuration language and all the files in this repository are intuitively simple and straightforward. They are written in simple text and functions that any beginner can understand. Terraform deployment with zero dependency, no prerequisites, no need to install additional software, no programming required.
75 |
76 | The idea was to create a full-fledged turnkey infrastructure, with deeper settings, so that any ecommerce manager could deploy it and immediately use it for his brand.
77 |
78 |
79 |
80 | # :rocket: Deployment into isolated VPC:
81 | - [x] Login to AWS Console
82 | - [x] [Subscribe to Debian 12 ARM](https://aws.amazon.com/marketplace/pp/prodview-63gms6fbfaota)
83 | - [x] Choose an AWS Region
84 | - [x] Start AWS CloudShell [fastest way to deploy and debug]
85 | - [x] Install Terraform:
86 | ```
87 | sudo yum install -y yum-utils
88 | sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
89 | sudo yum -y install packer terraform
90 | ```
91 | - [x] Create deployment directory:
92 | ```
93 | mkdir magento && cd magento
94 | ```
95 | - [x] Clone repo:
96 | >
97 | ```
98 | git clone https://github.com/magenx/Magento-2-aws-cluster-terraform.git .
99 | ```
100 | >
101 | ❗ Right after `terraform apply` you will receive email from amazon to approve resources
102 | - [x] Adjust your settings, edit your [cidr], [brand], [domain], [email] and other vars in `variables.tf`
103 | - [x] Define your source repository or use default and enable minimal Magento 2 package to install.
104 | - [x] Define either [production] or [development] environment variable in `variables.tf`
105 |
106 | ❗ ```For production deployment make sure to enable deletion protection and backup retention```
107 |
108 | - [x] Run:
109 | ```
110 | terraform fmt
111 | terraform init
112 | terraform workspace new production
113 | terraform plan -out production.plan.out -no-color 2>&1 > production.plan.out.txt
114 | terraform apply
115 | ```
116 | > to destroy infrastructure: ```terraform destroy```
117 | > resources created outside of terraform must be deleted manually, for example CloudWatch logs
118 |
119 |
120 |
121 | ## Complete setup:
122 | `3` autoscaling groups with launch templates configuration from `user_data`
123 | `3` target groups for load balancer (varnish frontend admin)
124 | `2` load balancers (external/internal) with listeners / rules
125 | `1` rds mariadb databases multi AZ
126 | `1` elasticsearch domain for Magento catalog search
127 | `2` redis elasticache cluster for sessions and cache
128 | `1` rabbitmq broker to manage queue messages
129 | `4` s3 buckets for [media] images and [system] files and logs (with access policy)
130 | `2` codecommit app files repository and services config files repository
131 | `1` cloudfront s3 origin distribution
132 | `1` efs file system for shared folders, with mount target per AZ
133 | `1` sns topic default subscription to receive email alerts
134 | `1` ses user access details for smtp module
135 |
136 | >resources are grouped into a virtual network, VPC dedicated to your brand
137 | >the settings initially imply a large store, and are designed for huge traffic.
138 | >services are clustered and replicated thus ready for failover.
139 |
140 | ##
141 | - [x] Deployment into isolated Virtual Private Cloud
142 | - [x] Autoscaling policy per each group
143 | - [x] Managed with [Systems Manager](https://aws.amazon.com/systems-manager/) agent
144 | - [x] Instance Profile assigned to simplify EC2 management
145 | - [x] Create and use ssm documents and EventBridge rules to automate tasks
146 | - [x] Simple Email Service authentication + SMTP Magento module
147 | - [x] CloudWatch agent configured to stream logs
148 | - [x] All Magento files managed with git only
149 | - [x] Configuration settings saved in Parameter Store
150 | - [x] Live shop in production mode / read-only
151 | - [x] Security groups configured for every service and instances
152 | - [x] phpMyAdmin for easy database editing
153 | - [x] Lambda database dump for data analysis
154 | - [x] [Lambda@Edge](https://aws.amazon.com/lambda/edge/#Real-time_Image_Transformation) images optimization
155 | - [x] Enhanced security in AWS and LEMP
156 | - [x] AWS Inspector Assessment templates
157 | - [x] AWS Config resource configuraton rules
158 | - [x] AWS WAF Protection rules
159 |
160 | ##
161 | 
162 |
163 | ## 💰 Infracost - shows cloud infrastructure cost estimates:
164 | ```
165 | infracost breakdown --path .
166 | INFO Autodetected 1 Terraform project across 1 root module
167 | INFO Found Terraform project main at directory .
168 |
169 | Project: main
170 |
171 | OVERALL TOTAL **$981.87
172 |
173 | ──────────────────────────────────
174 | 294 cloud resources were detected:
175 | ∙ 50 were estimated
176 | ∙ 238 were free
177 | ∙ 6 are not supported
178 | ```
179 | > ** conditionally approximate price per month of this infrastructure.
180 |
181 |
182 |
183 | ## :hammer_and_wrench: Magento 2 development | source code:
184 | - [x] Define your source repository or use default and enable minimal Magento 2 package to install.
185 | - [x] Check CodePipeline to install Magento 2 and pre-configure modules.
186 | - [x] EC2 instance user_data configured on boot to clone files from CodeCommit branch.
187 | > Replaced over 200+ useless modules. Minimal Magento 2 package can be extended anytime.
188 | > Remove replaced components from `composer.json` in `"replace": {}` and run `composer update`
189 | > modules configuration here: https://github.com/magenx/Magento-2/blob/main/composer.json
190 |
191 |
192 | **Performance and security enhancements**
193 | - Faster backend and frontend from 14% upto 50%
194 | - Better memory management upto 15%
195 | - Easy deployments
196 | - Less dependencies
197 | - Zero maintenance
198 | - Low security risks
199 |
200 |
201 |
202 | ## CI/CD scenario:
203 | - [x] Event driven
204 | - [x] Services configuration files tracked in CodeCommit repository
205 | - [x] Changes in CodeCommit repository triggers EventBridge rule.
206 | - [x] SSM Document pull from CodeCommit repository and cleanup.
207 | - [X] DevOps with local docker environment - developer and staging.
208 | - [x] GitHub Actions to build,release and sync with CodeCommit.
209 | - [x] Change deployment logic to your needs.
210 |
211 |
212 |
213 | ## AMI configuration and build using Packer:
214 | - [x] Build custom AMI with Packer configuration
215 | - [x] Reuse AMI in Terraform to create launch_template
216 |
217 |
218 |
219 | ## :e-mail: Contact us for installation and support:
220 | We can launch this project for your store in a short time. Many big retailers have already migrated to this architecture.
221 | - [x] Write us an [email](mailto:info@magenx.com?subject=Magento%202%20auto%20scaling%20cluster%20on%20AWS) to discuss the project.
222 | - [x] Send a private message on [Linkedin](https://www.linkedin.com/in/magenx/)
223 |
224 |
225 |
226 | ## :heart_eyes_cat: Support the project
227 | This takes time and research. You can use this for free. But its not free to create it.
228 | If you are using this project, there are few ways you can support it:
229 | - [x] Star and sharing the project
230 | - [x] Open an issue to help make it better
231 | - [x] Write a review https://trustpilot.com/review/www.magenx.com
232 |
233 | ❤️ Opensource
234 |
235 | [Magento 2 on the AWS Cloud: Quick Start Deployment](https://www.magenx.com/blog/post/adobe-commerce-cloud-alternative-infrastructure-as-code-terraform-configuration.html)
236 |
--------------------------------------------------------------------------------
/acm.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////////[ AWS CERTIFICATE MANAGER ]////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create ssl certificate for domain and subdomains
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_acm_certificate" "default" {
10 | domain_name = "${var.domain}"
11 | subject_alternative_names = ["*.${var.domain}"]
12 | validation_method = "DNS"
13 |
14 | lifecycle {
15 | create_before_destroy = true
16 | }
17 | tags = {
18 | Name = "${local.project}-${var.domain}-cert"
19 | }
20 | }
21 |
22 | resource "aws_acm_certificate" "cloudfront" {
23 | count = data.aws_region.current.name == "us-east-1" ? 0 : 1
24 | provider = aws.useast1
25 | domain_name = "${var.domain}"
26 | subject_alternative_names = ["*.${var.domain}"]
27 | validation_method = "DNS"
28 |
29 | lifecycle {
30 | create_before_destroy = true
31 | }
32 | tags = {
33 | Name = "${local.project}-${var.domain}-cert"
34 | }
35 | }
36 | # # ---------------------------------------------------------------------------------------------------------------------#
37 | # Validate ssl certificate for domain and subdomains
38 | # # ---------------------------------------------------------------------------------------------------------------------#
39 | resource "aws_acm_certificate_validation" "default" {
40 | certificate_arn = aws_acm_certificate.default.arn
41 | }
42 |
43 | resource "aws_acm_certificate_validation" "cloudfront" {
44 | count = data.aws_region.current.name == "us-east-1" ? 0 : 1
45 | provider = aws.useast1
46 | certificate_arn = aws_acm_certificate.cloudfront[0].arn
47 | }
48 |
--------------------------------------------------------------------------------
/alb.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////[ APPLICATION LOAD BALANCER ]////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create Application Load Balancers
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_lb" "this" {
10 | name = "${local.project}-alb"
11 | internal = false
12 | load_balancer_type = "application"
13 | enable_deletion_protection = var.alb["enable_deletion_protection"]
14 | drop_invalid_header_fields = true
15 | security_groups = [aws_security_group.alb.id]
16 | subnets = values(aws_subnet.this).*.id
17 | access_logs {
18 | bucket = aws_s3_bucket.this["system"].bucket
19 | prefix = "ALB"
20 | enabled = true
21 | }
22 | tags = {
23 | Name = "${local.project}-alb"
24 | }
25 | }
26 | # # ---------------------------------------------------------------------------------------------------------------------#
27 | # Create Target Groups for Load Balancers
28 | # # ---------------------------------------------------------------------------------------------------------------------#
29 | resource "aws_lb_target_group" "this" {
30 | name = "${local.project}-varnish"
31 | port = 80
32 | protocol = "HTTP"
33 | vpc_id = aws_vpc.this.id
34 | health_check {
35 | path = "/${random_string.this["health_check"].result}"
36 | interval = 30
37 | timeout = 5
38 | healthy_threshold = 3
39 | unhealthy_threshold = 2
40 | matcher = "200"
41 | }
42 | }
43 | # # ---------------------------------------------------------------------------------------------------------------------#
44 | # Create https:// listener for Load Balancer - default
45 | # # ---------------------------------------------------------------------------------------------------------------------#
46 | resource "aws_lb_listener" "https" {
47 | depends_on = [aws_acm_certificate_validation.default]
48 | load_balancer_arn = aws_lb.this.arn
49 | port = "443"
50 | protocol = "HTTPS"
51 | ssl_policy = "ELBSecurityPolicy-TLS13-1-2-Res-2021-06"
52 | certificate_arn = aws_acm_certificate.default.arn
53 | default_action {
54 | type = "fixed-response"
55 | fixed_response {
56 | content_type = "text/plain"
57 | message_body = "No targets are responding to this request"
58 | status_code = "418"
59 | }
60 | }
61 | }
62 | # # ---------------------------------------------------------------------------------------------------------------------#
63 | # Create http:// listener for Load Balancer - redirect to https://
64 | # # ---------------------------------------------------------------------------------------------------------------------#
65 | resource "aws_lb_listener" "http" {
66 | load_balancer_arn = aws_lb.this.arn
67 | port = "80"
68 | protocol = "HTTP"
69 | default_action {
70 | type = "redirect"
71 | redirect {
72 | port = "443"
73 | protocol = "HTTPS"
74 | status_code = "HTTP_301"
75 | }
76 | }
77 | }
78 | # # ---------------------------------------------------------------------------------------------------------------------#
79 | # Create conditional listener rule for Load Balancer - forward to varnish
80 | # # ---------------------------------------------------------------------------------------------------------------------#
81 | resource "aws_lb_listener_rule" "varnish" {
82 | listener_arn = aws_lb_listener.https.arn
83 | priority = 30
84 | action {
85 | type = "forward"
86 | target_group_arn = aws_lb_target_group.this.arn
87 | }
88 | condition {
89 | host_header {
90 | values = [var.domain]
91 | }
92 | }
93 | condition {
94 | http_header {
95 | http_header_name = "X-Magenx-Header"
96 | values = [random_uuid.this.result]
97 | }
98 | }
99 | }
100 | # # ---------------------------------------------------------------------------------------------------------------------#
101 | # Create CloudWatch HTTP 5XX metrics and email alerts
102 | # # ---------------------------------------------------------------------------------------------------------------------#
103 | resource "aws_cloudwatch_metric_alarm" "httpcode_target_5xx_count" {
104 | alarm_name = "${local.project}-http-5xx-errors-from-target"
105 | comparison_operator = "GreaterThanThreshold"
106 | evaluation_periods = "1"
107 | metric_name = "HTTPCode_Target_5XX_Count"
108 | namespace = "AWS/ApplicationELB"
109 | period = 300
110 | statistic = "Sum"
111 | threshold = var.alb["error_threshold"]
112 | alarm_description = "HTTPCode 5XX count for frontend instances over ${var.alb["error_threshold"]}"
113 | alarm_actions = ["${aws_sns_topic.default.arn}"]
114 | ok_actions = ["${aws_sns_topic.default.arn}"]
115 |
116 | dimensions = {
117 | TargetGroup = aws_lb_target_group.this.arn
118 | LoadBalancer = aws_lb.this.arn
119 | }
120 | }
121 | # # ---------------------------------------------------------------------------------------------------------------------#
122 | # Create CloudWatch HTTP 5XX metrics and email alerts
123 | # # ---------------------------------------------------------------------------------------------------------------------#
124 | resource "aws_cloudwatch_metric_alarm" "httpcode_elb_5xx_count" {
125 | alarm_name = "${local.project}-http-5xx-errors-from-loadbalancer"
126 | comparison_operator = "GreaterThanThreshold"
127 | evaluation_periods = "1"
128 | metric_name = "HTTPCode_ELB_5XX_Count"
129 | namespace = "AWS/ApplicationELB"
130 | period = 300
131 | statistic = "Sum"
132 | threshold = var.alb["error_threshold"]
133 | alarm_description = "HTTPCode 5XX count for loadbalancer over ${var.alb["error_threshold"]}"
134 | alarm_actions = ["${aws_sns_topic.default.arn}"]
135 | ok_actions = ["${aws_sns_topic.default.arn}"]
136 |
137 | dimensions = {
138 | LoadBalancer = aws_lb.this.arn
139 | }
140 | }
141 | # # ---------------------------------------------------------------------------------------------------------------------#
142 | # Create CloudWatch RequestCount metrics and email alerts
143 | # # ---------------------------------------------------------------------------------------------------------------------#
144 | resource "aws_cloudwatch_metric_alarm" "alb_rps" {
145 | alarm_name = "${local.project}-loadbalancer-rps"
146 | comparison_operator = "GreaterThanThreshold"
147 | evaluation_periods = "1"
148 | metric_name = "RequestCount"
149 | namespace = "AWS/ApplicationELB"
150 | period = "120"
151 | statistic = "Sum"
152 | threshold = var.alb["rps_threshold"]
153 | alarm_description = "The number of requests processed over 2 minutes greater than ${var.alb["rps_threshold"]}"
154 | alarm_actions = ["${aws_sns_topic.default.arn}"]
155 | ok_actions = ["${aws_sns_topic.default.arn}"]
156 |
157 | dimensions = {
158 | LoadBalancer = aws_lb.this.arn
159 | }
160 | }
161 |
162 |
163 |
--------------------------------------------------------------------------------
/asg.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////////[ AUTOSCALING CONFIGURATION ]////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create Launch Template for Autoscaling Groups
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_launch_template" "this" {
10 | for_each = var.ec2
11 | name = "${local.project}-${each.key}-ltpl"
12 | iam_instance_profile { name = aws_iam_instance_profile.ec2[each.key].name }
13 | image_id = data.aws_ami.distro.id
14 | instance_type = each.value.instance_type
15 | block_device_mappings {
16 | device_name = "/dev/xvda"
17 | ebs {
18 | volume_size = each.value.volume_size
19 | volume_type = "gp3"
20 | encrypted = true
21 | delete_on_termination = true
22 | }
23 | }
24 | monitoring { enabled = true }
25 | network_interfaces {
26 | associate_public_ip_address = true
27 | security_groups = [aws_security_group.ec2.id]
28 | }
29 | tag_specifications {
30 | resource_type = "instance"
31 | tags = {
32 | Name = "${local.project}-${each.key}-ec2"
33 | Instance_name = each.key
34 | Hostname = "${each.key}.${var.brand}.internal"
35 | }
36 | }
37 | tag_specifications {
38 | resource_type = "volume"
39 | tags = {
40 | Name = "${local.project}-${each.key}-volume"
41 | }
42 | }
43 | user_data = base64encode(< {
12 | switch (operation.toLowerCase()) {
13 | case 'format':
14 | var SUPPORTED_FORMATS = ['auto', 'jpeg', 'webp', 'avif', 'png', 'svg', 'gif'];
15 | if (request.querystring[operation]['value'] && SUPPORTED_FORMATS.includes(request.querystring[operation]['value'].toLowerCase())) {
16 | var format = request.querystring[operation]['value'].toLowerCase(); // normalize to lowercase
17 | if (format === 'auto') {
18 | format = 'jpeg';
19 | if (request.headers['accept']) {
20 | if (request.headers['accept'].value.includes("avif")) {
21 | format = 'avif';
22 | } else if (request.headers['accept'].value.includes("webp")) {
23 | format = 'webp';
24 | }
25 | }
26 | }
27 | normalizedOperations['format'] = format;
28 | }
29 | break;
30 | case 'width':
31 | if (request.querystring[operation]['value']) {
32 | var width = parseInt(request.querystring[operation]['value']);
33 | if (!isNaN(width) && (width > 0)) {
34 | if (width > 500) width = 500;
35 | normalizedOperations['width'] = width.toString();
36 | }
37 | }
38 | break;
39 | case 'height':
40 | if (request.querystring[operation]['value']) {
41 | var height = parseInt(request.querystring[operation]['value']);
42 | if (!isNaN(height) && (height > 0)) {
43 | if (height > 500) height = 500;
44 | normalizedOperations['height'] = height.toString();
45 | }
46 | }
47 | break;
48 | case 'quality':
49 | if (request.querystring[operation]['value']) {
50 | var quality = parseInt(request.querystring[operation]['value']);
51 | if (!isNaN(quality) && (quality > 0)) {
52 | if (quality > 100) quality = 65;
53 | normalizedOperations['quality'] = quality.toString();
54 | }
55 | }
56 | break;
57 | default: break;
58 | }
59 | });
60 |
61 | // Rewrite the path to the normalized version if valid operations are found
62 | if (Object.keys(normalizedOperations).length > 0) {
63 | var normalizedOperationsArray = [];
64 | if (normalizedOperations.format) normalizedOperationsArray.push('format='+normalizedOperations.format);
65 | if (normalizedOperations.quality) normalizedOperationsArray.push('quality='+normalizedOperations.quality);
66 | if (normalizedOperations.width) normalizedOperationsArray.push('width='+normalizedOperations.width);
67 | if (normalizedOperations.height) normalizedOperationsArray.push('height='+normalizedOperations.height);
68 | request.uri = originalImagePath + '?' + normalizedOperationsArray.join('&');
69 | } else {
70 | // If no valid operation is found
71 | request.uri = originalImagePath + '?width=250&height=250&quality=65';
72 | }
73 |
74 | } else {
75 | // If no query strings are found
76 | request.uri = originalImagePath + '?width=350'
77 | }
78 |
79 | // Remove query strings
80 | request.querystring = {};
81 | return request;
82 | }
83 |
--------------------------------------------------------------------------------
/cloudmap.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////////////[ CLOUDMAP DISCOVERY ]/////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create CloudMap discovery service with private dns namespace
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_service_discovery_private_dns_namespace" "this" {
10 | name = "${var.brand}.internal"
11 | description = "Namespace for ${local.project}"
12 | vpc = aws_vpc.this.id
13 | tags = {
14 | Name = "${local.project}-namespace"
15 | }
16 | }
17 |
18 | resource "aws_service_discovery_service" "this" {
19 | for_each = {
20 | for entry in setproduct(keys(var.ec2), keys(data.aws_availability_zone.available)) :
21 | "${entry[0]}-${entry[1]}" => {
22 | service = entry[0],
23 | az = entry[1]
24 | }
25 | }
26 | name = "${local.project}-${each.value.service}-${each.value.az}"
27 | dns_config {
28 | namespace_id = aws_service_discovery_private_dns_namespace.this.id
29 | dns_records {
30 | type = "A"
31 | ttl = 10
32 | }
33 | }
34 | health_check_custom_config {
35 | failure_threshold = 1
36 | }
37 | force_destroy = true
38 | tags = {
39 | Name = "${local.project}-${each.value.service}-${each.value.az}-service"
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/codedeploy.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////////////[ CODEDEPLOY ]///////////////////////////////////////////////////
5 | # # ---------------------------------------------------------------------------------------------------------------------#
6 | # Create CodeDeploy role
7 | # # ---------------------------------------------------------------------------------------------------------------------#
8 | data "aws_iam_policy_document" "codedeploy_assume_role" {
9 | statement {
10 | effect = "Allow"
11 | actions = ["sts:AssumeRole"]
12 | principals {
13 | type = "Service"
14 | identifiers = ["codedeploy.amazonaws.com"]
15 | }
16 | }
17 | }
18 |
19 | resource "aws_iam_role" "codedeploy" {
20 | name = "${local.project}-codedeploy-role"
21 | description = "Allows CodeDeploy to call AWS services on your behalf."
22 | assume_role_policy = data.aws_iam_policy_document.codedeploy_assume_role.json
23 | tags = {
24 | Name = "${local.project}-codedeploy-role"
25 | }
26 | }
27 |
28 | resource "aws_iam_role_policy_attachment" "AWSCodeDeployRole" {
29 | policy_arn = "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole"
30 | role = aws_iam_role.codedeploy.name
31 | }
32 |
33 | data "aws_iam_policy_document" "codedeploy" {
34 | statement {
35 | sid = "AllowCodeDeployToASG"
36 | effect = "Allow"
37 | actions = [
38 | "autoscaling:CompleteLifecycleAction",
39 | "autoscaling:DescribeAutoScalingGroups",
40 | "autoscaling:PutLifecycleHook",
41 | "autoscaling:DeleteLifecycleHook",
42 | "autoscaling:RecordLifecycleActionHeartbeat"
43 | ]
44 | resources = ["*"]
45 | }
46 | }
47 |
48 | resource "aws_iam_role_policy" "codedeploy" {
49 | role = aws_iam_role.codedeploy.name
50 | policy = data.aws_iam_policy_document.codedeploy.json
51 | }
52 | # # ---------------------------------------------------------------------------------------------------------------------#
53 | # CodeDeploy Applications for frontend ASG
54 | # # ---------------------------------------------------------------------------------------------------------------------#
55 | resource "aws_codedeploy_app" "this" {
56 | for_each = { for instance, value in var.ec2 : instance => value if instance == "frontend" }
57 | name = "${local.project}-${each.key}-codedeploy-app"
58 | compute_platform = "Server"
59 | }
60 | # # ---------------------------------------------------------------------------------------------------------------------#
61 | # CodeDeploy Deployment Groups for ASGs
62 | # # ---------------------------------------------------------------------------------------------------------------------#
63 | resource "aws_codedeploy_deployment_group" "this" {
64 | for_each = { for instance, value in var.ec2 : instance => value if instance == "frontend" }
65 | deployment_group_name = "${local.project}-${each.key}-deployment-group"
66 | deployment_config_name = "CodeDeployDefault.AllAtOnce"
67 | app_name = aws_codedeploy_app.this[each.key].name
68 | service_role_arn = aws_iam_role.codedeploy.arn
69 | autoscaling_groups = [aws_autoscaling_group.this[each.key].name]
70 | trigger_configuration {
71 | trigger_events = ["DeploymentStart","DeploymentSuccess","DeploymentFailure"]
72 | trigger_name = "${local.project}-${each.key}-deployment-notification"
73 | trigger_target_arn = aws_sns_topic.default.arn
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/data.tf:
--------------------------------------------------------------------------------
1 | # # ---------------------------------------------------------------------------------------------------------------------#
2 | # Get the name of the region where the Terraform deployment is running
3 | # # ---------------------------------------------------------------------------------------------------------------------#
4 | data "aws_region" "current" {}
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Get the effective Account ID, User ID, and ARN in which Terraform is authorized.
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | data "aws_caller_identity" "current" {}
10 |
11 | # # ---------------------------------------------------------------------------------------------------------------------#
12 | # Get the Account ID of the AWS ELB Service Account for the purpose of permitting in S3 bucket policy.
13 | # # ---------------------------------------------------------------------------------------------------------------------#
14 | data "aws_elb_service_account" "current" {}
15 |
16 | # # ---------------------------------------------------------------------------------------------------------------------#
17 | # Get AWS Inspector rules available in this region
18 | # # ---------------------------------------------------------------------------------------------------------------------#
19 | data "aws_inspector_rules_packages" "available" {}
20 |
21 | # # ---------------------------------------------------------------------------------------------------------------------#
22 | # Get default tags aws provider
23 | # # ---------------------------------------------------------------------------------------------------------------------#
24 | data "aws_default_tags" "this" {}
25 |
26 | # # ---------------------------------------------------------------------------------------------------------------------#
27 | # Get the list of AWS Availability Zones available in this region
28 | # # ---------------------------------------------------------------------------------------------------------------------#
29 | data "aws_availability_zones" "available" {
30 | state = "available"
31 | exclude_zone_ids = ["use1-az3"]
32 | }
33 | data "aws_availability_zone" "available" {
34 | for_each = toset(data.aws_availability_zones.available.names)
35 | name = each.key
36 | }
37 | # # ---------------------------------------------------------------------------------------------------------------------#
38 | # Get the ID of default VPC
39 | # # ---------------------------------------------------------------------------------------------------------------------#
40 | data "aws_vpc" "default" {
41 | default = true
42 | }
43 | # # ---------------------------------------------------------------------------------------------------------------------#
44 | # Get default subnets from AZ in this region/vpc
45 | # # ---------------------------------------------------------------------------------------------------------------------#
46 | data "aws_subnets" "default" {
47 | filter {
48 | name = "vpc-id"
49 | values = [data.aws_vpc.default.id]
50 | }
51 |
52 | filter {
53 | name = "default-for-az"
54 | values = ["true"]
55 | }
56 | }
57 | # # ---------------------------------------------------------------------------------------------------------------------#
58 | # Get all available VPC in this region
59 | # # ---------------------------------------------------------------------------------------------------------------------#
60 | data "aws_vpcs" "available" {}
61 |
62 | data "aws_vpc" "all" {
63 | for_each = toset(data.aws_vpcs.available.ids)
64 | id = each.key
65 | }
66 | # # ---------------------------------------------------------------------------------------------------------------------#
67 | # Get the ID of default Security Group
68 | # # ---------------------------------------------------------------------------------------------------------------------#
69 | data "aws_security_group" "default" {
70 | vpc_id = data.aws_vpc.default.id
71 |
72 | filter {
73 | name = "group-name"
74 | values = ["default"]
75 | }
76 | }
77 | # # ---------------------------------------------------------------------------------------------------------------------#
78 | # Get the ID of CloudFront origin request policy
79 | # # ---------------------------------------------------------------------------------------------------------------------#
80 | data "aws_cloudfront_origin_request_policy" "media" {
81 | name = "Managed-CORS-S3Origin"
82 | }
83 | data "aws_cloudfront_origin_request_policy" "alb" {
84 | name = "Managed-CORS-CustomOrigin"
85 | }
86 | data "aws_cloudfront_origin_request_policy" "admin" {
87 | name = "Managed-AllViewer"
88 | }
89 | # # ---------------------------------------------------------------------------------------------------------------------#
90 | # Get the ID of CloudFront cache policy.
91 | # # ---------------------------------------------------------------------------------------------------------------------#
92 | data "aws_cloudfront_cache_policy" "alb" {
93 | name = "UseOriginCacheControlHeaders-QueryStrings"
94 | }
95 | data "aws_cloudfront_cache_policy" "admin" {
96 | name = "Managed-CachingDisabled"
97 | }
98 | # # ---------------------------------------------------------------------------------------------------------------------#
99 | # Get get the latest ID of a registered AMI linux distro by owner and version
100 | # # ---------------------------------------------------------------------------------------------------------------------#
101 | data "aws_ami" "distro" {
102 | most_recent = true
103 | owners = ["136693071363"] # debian
104 |
105 | filter {
106 | name = "name"
107 | values = ["debian-12-arm64*"] # debian
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/ec2_profile.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////////////////[ EC2 PROFILE ]////////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create EC2 service role
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | data "aws_iam_policy_document" "ec2_assume_role" {
10 | statement {
11 | actions = ["sts:AssumeRole"]
12 | principals {
13 | type = "Service"
14 | identifiers = ["ec2.amazonaws.com"]
15 | }
16 | effect = "Allow"
17 | sid = "EC2AssumeRole"
18 | }
19 | }
20 |
21 | resource "aws_iam_role" "ec2" {
22 | for_each = var.ec2
23 | name = "${local.project}-EC2InstanceRole-${each.key}"
24 | description = "Allows EC2 instances to call AWS services on your behalf"
25 | assume_role_policy = data.aws_iam_policy_document.ec2_assume_role.json
26 | }
27 | # # ---------------------------------------------------------------------------------------------------------------------#
28 | # Attach policies to EC2 service role
29 | # # ---------------------------------------------------------------------------------------------------------------------#
30 | resource "aws_iam_role_policy_attachment" "ec2" {
31 | for_each = {
32 | for entry in setproduct(keys(var.ec2), var.ec2_instance_profile_policy) :
33 | "${entry[0]}-${entry[1]}" => {
34 | role = entry[0],
35 | policy = entry[1]
36 | }
37 | }
38 | role = aws_iam_role.ec2[each.value.role].name
39 | policy_arn = each.value.policy
40 | }
41 | # # ---------------------------------------------------------------------------------------------------------------------#
42 | # Create inline policy for EC2 service role to publish sns message
43 | # # ---------------------------------------------------------------------------------------------------------------------#
44 | data "aws_iam_policy_document" "sns_publish" {
45 | for_each = var.ec2
46 | statement {
47 | sid = "EC2ProfileSNSPublishPolicy${each.key}"
48 | effect = "Allow"
49 | actions = [
50 | "sns:Publish"
51 | ]
52 | resources = [
53 | aws_sns_topic.default.arn
54 | ]
55 | }
56 | }
57 |
58 | resource "aws_iam_role_policy" "sns_publish" {
59 | for_each = var.ec2
60 | name = "EC2ProfileSNSPublishPolicy${title(each.key)}"
61 | role = aws_iam_role.ec2[each.key].id
62 | policy = data.aws_iam_policy_document.sns_publish[each.key].json
63 | }
64 | # # ---------------------------------------------------------------------------------------------------------------------#
65 | # Create inline policy for EC2 service role to send ses emails
66 | # # ---------------------------------------------------------------------------------------------------------------------#
67 | data "aws_iam_policy_document" "ses_send" {
68 | for_each = var.ec2
69 | statement {
70 | sid = "EC2ProfileSESSendPolicy${each.key}"
71 | effect = "Allow"
72 | actions = [
73 | "ses:SendEmail",
74 | "ses:SendRawEmail"
75 | ]
76 | resources = ["*"]
77 | condition {
78 | test = "StringEquals"
79 | variable = "ses:FromAddress"
80 | values = [var.admin_email]
81 | }
82 | }
83 | }
84 |
85 | resource "aws_iam_role_policy" "ses_send" {
86 | for_each = var.ec2
87 | name = "EC2ProfileSESSendPolicy${title(each.key)}"
88 | role = aws_iam_role.ec2[each.key].id
89 | policy = data.aws_iam_policy_document.ses_send[each.key].json
90 | }
91 | # # ---------------------------------------------------------------------------------------------------------------------#
92 | # Create EC2 Instance Profile
93 | # # ---------------------------------------------------------------------------------------------------------------------#
94 | resource "aws_iam_instance_profile" "ec2" {
95 | for_each = var.ec2
96 | name = "${local.project}-EC2InstanceProfile-${each.key}"
97 | role = aws_iam_role.ec2[each.key].name
98 | }
99 |
--------------------------------------------------------------------------------
/efs.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////////[ ELASTIC FILE SYSTEM ]////////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create EFS file system
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_efs_file_system" "this" {
10 | creation_token = "${local.project}-efs-storage"
11 | tags = {
12 | Name = "${local.project}-efs-storage"
13 | }
14 | }
15 | # # ---------------------------------------------------------------------------------------------------------------------#
16 | # Create EFS mount target for each subnet
17 | # # ---------------------------------------------------------------------------------------------------------------------#
18 | resource "aws_efs_mount_target" "this" {
19 | for_each = aws_subnet.this
20 | file_system_id = aws_efs_file_system.this.id
21 | subnet_id = aws_subnet.this[each.key].id
22 | security_groups = [aws_security_group.efs.id]
23 | }
24 | # # ---------------------------------------------------------------------------------------------------------------------#
25 | # Create EFS access point for each path
26 | # # ---------------------------------------------------------------------------------------------------------------------#
27 | resource "aws_efs_access_point" "this" {
28 | for_each = var.efs
29 | file_system_id = aws_efs_file_system.this.id
30 | posix_user {
31 | uid = each.value.uid
32 | gid = each.value.gid
33 | }
34 | root_directory {
35 | path = "/${each.key}"
36 | creation_info {
37 | owner_uid = each.value.uid
38 | owner_gid = each.value.gid
39 | permissions = each.value.permissions
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/elasticache.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | //////////////////////////////////////////////////////////[ ELASTICACHE ]/////////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create ElastiCache subnet group in our dedicated VPC
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_elasticache_subnet_group" "this" {
10 | description = "ElastiCache Subnet for ${replace(local.project,"-"," ")}"
11 | name = "${local.project}-elasticache-subnet"
12 | subnet_ids = values(aws_subnet.this).*.id
13 | tags = {
14 | Name = "${local.project}-elasticache-subnet"
15 | }
16 | }
17 | # # ---------------------------------------------------------------------------------------------------------------------#
18 | # Create ElastiCache parameter groups
19 | # # ---------------------------------------------------------------------------------------------------------------------#
20 | resource "aws_elasticache_parameter_group" "this" {
21 | for_each = toset(var.redis["name"])
22 | name = "${local.project}-${each.key}-parameter"
23 | family = var.redis["family"]
24 | description = "Parameter group for ${var.domain} ${each.key} backend"
25 | dynamic "parameter" {
26 | for_each = var.redis_parameters
27 | content {
28 | name = parameter.value["name"]
29 | value = parameter.value["value"]
30 | }
31 | }
32 | tags = {
33 | Name = "${local.project}-${each.key}-parameter"
34 | }
35 | }
36 | # # ---------------------------------------------------------------------------------------------------------------------#
37 | # Create ElastiCache - Redis Replication group - session + cache
38 | # # ---------------------------------------------------------------------------------------------------------------------#
39 | resource "aws_elasticache_replication_group" "this" {
40 | for_each = toset(var.redis["name"])
41 | description = "Replication group for ${var.domain} ${each.key} backend"
42 | num_cache_clusters = var.redis["num_cache_clusters"]
43 | at_rest_encryption_enabled = var.redis["at_rest_encryption_enabled"]
44 | engine = "redis"
45 | engine_version = var.redis["engine_version"]
46 | replication_group_id = "${local.project}-${each.key}-backend"
47 | node_type = var.redis["node_type"]
48 | port = var.redis["port"]
49 | parameter_group_name = aws_elasticache_parameter_group.this[each.key].id
50 | security_group_ids = [aws_security_group.redis.id]
51 | subnet_group_name = aws_elasticache_subnet_group.this.name
52 | automatic_failover_enabled = var.redis["num_cache_clusters"] > 1 ? true : false
53 | multi_az_enabled = var.redis["num_cache_clusters"] > 1 ? true : false
54 | notification_topic_arn = aws_sns_topic.default.arn
55 | transit_encryption_enabled = true
56 | auth_token = random_password.this["redis"].result
57 | auth_token_update_strategy = "ROTATE"
58 | lifecycle {
59 | ignore_changes = [num_cache_clusters]
60 | }
61 | tags = {
62 | Name = "${local.project}-${each.key}-backend"
63 | }
64 | }
65 | # # ---------------------------------------------------------------------------------------------------------------------#
66 | # Create CloudWatch CPU Utilization metrics and email alerts
67 | # # ---------------------------------------------------------------------------------------------------------------------#
68 | resource "aws_cloudwatch_metric_alarm" "elasticache_cpu" {
69 | for_each = aws_elasticache_replication_group.this
70 | alarm_name = "${local.project}-elasticache-${each.key}-cpu-utilization"
71 | alarm_description = "Redis cluster CPU utilization"
72 | comparison_operator = "GreaterThanThreshold"
73 | evaluation_periods = "1"
74 | metric_name = "CPUUtilization"
75 | namespace = "AWS/ElastiCache"
76 | period = "300"
77 | statistic = "Average"
78 | threshold = 80
79 | alarm_actions = ["${aws_sns_topic.default.arn}"]
80 | ok_actions = ["${aws_sns_topic.default.arn}"]
81 |
82 | dimensions = {
83 | CacheClusterId = aws_elasticache_replication_group.this[each.key].id
84 | }
85 | }
86 | # # ---------------------------------------------------------------------------------------------------------------------#
87 | # Create CloudWatch Freeable Memory metrics and email alerts
88 | # # ---------------------------------------------------------------------------------------------------------------------#
89 | resource "aws_cloudwatch_metric_alarm" "elasticache_memory" {
90 | for_each = aws_elasticache_replication_group.this
91 | alarm_name = "${local.project}-elasticache-${each.key}-freeable-memory"
92 | alarm_description = "Redis cluster freeable memory"
93 | comparison_operator = "LessThanThreshold"
94 | evaluation_periods = "1"
95 | metric_name = "FreeableMemory"
96 | namespace = "AWS/ElastiCache"
97 | period = "60"
98 | statistic = "Average"
99 | threshold = 10000000
100 | alarm_actions = ["${aws_sns_topic.default.arn}"]
101 | ok_actions = ["${aws_sns_topic.default.arn}"]
102 |
103 | dimensions = {
104 | CacheClusterId = aws_elasticache_replication_group.this[each.key].id
105 | }
106 | }
107 |
108 |
109 |
--------------------------------------------------------------------------------
/eventbridge.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ////////////////////////////////////////////////////////[ EVENTBRIDGE RULES ]/////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create EventBridge service role
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | data "aws_iam_policy_document" "eventbridge_assume_role" {
10 | statement {
11 | effect = "Allow"
12 | actions = ["sts:AssumeRole"]
13 | principals {
14 | type = "Service"
15 | identifiers = ["events.amazonaws.com"]
16 | }
17 | }
18 | }
19 | resource "aws_iam_role" "eventbridge_service_role" {
20 | name = "${local.project}-EventBridgeServiceRole"
21 | description = "Provides EventBridge manage events on your behalf."
22 | assume_role_policy = data.aws_iam_policy_document.eventbridge_assume_role.json
23 | }
24 | data "aws_iam_policy_document" "eventbridge_policy" {
25 | statement {
26 | effect = "Allow"
27 | actions = [
28 | "ssm:StartAutomationExecution",
29 | "sqs:SendMessage"
30 | ]
31 | resources = ["*"]
32 | }
33 | }
34 | resource "aws_iam_policy" "eventbridge_policy" {
35 | name = "${local.project}-EventBridgePolicy"
36 | policy = data.aws_iam_policy_document.eventbridge_policy.json
37 | }
38 | resource "aws_iam_role_policy_attachment" "eventbridge_policy_attach" {
39 | role = aws_iam_role.eventbridge_service_role.name
40 | policy_arn = aws_iam_policy.eventbridge_policy.arn
41 | }
42 | # # ---------------------------------------------------------------------------------------------------------------------#
43 | # EventBridge Rule for S3 bucket object event for setup update
44 | # # ---------------------------------------------------------------------------------------------------------------------#
45 | resource "aws_cloudwatch_event_rule" "s3_setup_update" {
46 | name = "${local.project}-s3-setup-update"
47 | description = "Trigger SSM document when s3 system bucket setup updated"
48 | event_pattern = jsonencode({
49 | "source": ["aws.s3"],
50 | "detail-type" : ["Object Created"],
51 | "detail" : {
52 | "bucket" : { "name" : [aws_s3_bucket.this["system"].bucket] },
53 | "object" : { "key" : [{ "prefix" : "setup/" }] }
54 | }
55 | })
56 | }
57 | # # ---------------------------------------------------------------------------------------------------------------------#
58 | # EventBridge Rule Target for SSM Document configuration on S3 update
59 | # # ---------------------------------------------------------------------------------------------------------------------#
60 | resource "aws_cloudwatch_event_target" "s3_setup_update" {
61 | depends_on = [aws_autoscaling_group.this]
62 | rule = aws_cloudwatch_event_rule.s3_setup_update.name
63 | target_id = "${local.project}-s3-system-setup-update"
64 | arn = aws_ssm_document.configuration.arn
65 | role_arn = aws_iam_role.eventbridge_service_role.arn
66 | dead_letter_config {
67 | arn = aws_sqs_queue.dead_letter_queue.arn
68 | }
69 | input_transformer {
70 | input_paths = {
71 | S3ObjectKey = "$.detail.object.key"
72 | }
73 | input_template = <"]
76 | }
77 | END
78 | }
79 | }
80 | # # ---------------------------------------------------------------------------------------------------------------------#
81 | # EventBridge Rule for S3 bucket object event for release update
82 | # # ---------------------------------------------------------------------------------------------------------------------#
83 | resource "aws_cloudwatch_event_rule" "s3_release_update" {
84 | name = "${local.project}-s3-release-update"
85 | description = "Trigger SSM document when s3 system bucket release updated"
86 | event_pattern = jsonencode({
87 | "source": ["aws.s3"],
88 | "detail-type" : ["Object Created"],
89 | "detail" : {
90 | "bucket" : { "name" : [aws_s3_bucket.this["system"].bucket] },
91 | "object" : { "key" : [{ "prefix" : "release/" }] }
92 | }
93 | })
94 | }
95 | # # ---------------------------------------------------------------------------------------------------------------------#
96 | # EventBridge Rule Target for SSM Document S3 release update
97 | # # ---------------------------------------------------------------------------------------------------------------------#
98 | resource "aws_cloudwatch_event_target" "s3_release_update" {
99 | depends_on = [aws_autoscaling_group.this]
100 | rule = aws_cloudwatch_event_rule.s3_release_update.name
101 | target_id = "${local.project}-s3-system-release-update"
102 | arn = aws_ssm_document.release.arn
103 | role_arn = aws_iam_role.eventbridge_service_role.arn
104 | dead_letter_config {
105 | arn = aws_sqs_queue.dead_letter_queue.arn
106 | }
107 | input_transformer {
108 | input_paths = {
109 | S3ObjectKey = "$.detail.object.key"
110 | }
111 | input_template = <"]
114 | }
115 | END
116 | }
117 | }
118 | # # ---------------------------------------------------------------------------------------------------------------------#
119 | # EventBridge Rule for EC2 instance termination lifecycle
120 | # # ---------------------------------------------------------------------------------------------------------------------#
121 | resource "aws_cloudwatch_event_rule" "ec2_terminating" {
122 | name = "${local.project}-ec2-terminating-rule"
123 | description = "Trigger on EC2 instance terminating"
124 | event_pattern = jsonencode({
125 | "source" : ["aws.autoscaling"],
126 | "detail-type" : ["EC2 Instance-terminate Lifecycle Action"],
127 | "detail" : {
128 | "LifecycleTransition" : ["autoscaling:EC2_INSTANCE_TERMINATING"],
129 | "Origin": [ "AutoScalingGroup" ],
130 | "Destination": [ "EC2" ]
131 | }
132 | })
133 | }
134 | # # ---------------------------------------------------------------------------------------------------------------------#
135 | # EventBridge Rule Target for SSM Document CloudMap Deregister
136 | # # ---------------------------------------------------------------------------------------------------------------------#
137 | resource "aws_cloudwatch_event_target" "ec2_terminating" {
138 | depends_on = [aws_autoscaling_group.this]
139 | rule = aws_cloudwatch_event_rule.ec2_terminating.name
140 | target_id = "${local.project}-cloudmap-deregister"
141 | arn = aws_ssm_document.cloudmap_deregister.arn
142 | role_arn = aws_iam_role.eventbridge_service_role.arn
143 | dead_letter_config {
144 | arn = aws_sqs_queue.dead_letter_queue.arn
145 | }
146 | input_transformer {
147 | input_paths = {
148 | InstanceId = "$.detail.EC2InstanceId"
149 | AutoScalingGroupName = "$.detail.AutoScalingGroupName"
150 | }
151 | input_template = <"],
154 | "AutoScalingGroupName": [""]
155 | }
156 | END
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/lambda.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////////[ LAMBDA IMAGE OPTIMIZATION ]//////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create Lambda IAM role and attach policy permissions
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_cloudwatch_log_group" "lambda" {
10 | name = "/lambda/${aws_lambda_function.image_optimization.function_name}/"
11 | retention_in_days = 7
12 | }
13 |
14 | data "aws_iam_policy_document" "lambda" {
15 | statement {
16 | sid = "LambdaLog"
17 | effect = "Allow"
18 | actions = [
19 | "logs:CreateLogGroup",
20 | "logs:CreateLogStream",
21 | "logs:PutLogEvents",
22 | ]
23 | resources = ["*"]
24 | }
25 |
26 | statement {
27 | sid = "LambdaAccess"
28 | effect = "Allow"
29 | actions = [
30 | "ec2:CreateNetworkInterface",
31 | "ec2:DescribeNetworkInterfaces",
32 | "ec2:DescribeSubnets",
33 | "ec2:DeleteNetworkInterface",
34 | "ec2:AssignPrivateIpAddresses",
35 | "ec2:UnassignPrivateIpAddresses"
36 | ]
37 | resources = ["*"]
38 | }
39 | }
40 |
41 | resource "aws_iam_policy" "lambda" {
42 | name = "${local.project}-lambda"
43 | path = "/"
44 | description = "IAM policy for lambda"
45 | policy = data.aws_iam_policy_document.lambda.json
46 | }
47 |
48 | resource "aws_iam_role_policy_attachment" "lambda" {
49 | role = aws_iam_role.lambda.name
50 | policy_arn = aws_iam_policy.lambda.arn
51 | }
52 |
53 | data "aws_iam_policy_document" "assume_role" {
54 | statement {
55 | effect = "Allow"
56 | principals {
57 | type = "Service"
58 | identifiers = ["lambda.amazonaws.com"]
59 | }
60 | actions = ["sts:AssumeRole"]
61 | }
62 | }
63 |
64 | resource "aws_iam_role" "lambda" {
65 | name = "${local.project}-lambda"
66 | assume_role_policy = data.aws_iam_policy_document.assume_role.json
67 | }
68 | # # ---------------------------------------------------------------------------------------------------------------------#
69 | # Create Lambda permissions for CloudFront
70 | # # ---------------------------------------------------------------------------------------------------------------------#
71 | resource "aws_lambda_permission" "this" {
72 | provider = aws.useast1
73 | statement_id = "AllowCloudFrontServicePrincipal"
74 | action = "lambda:InvokeFunctionUrl"
75 | function_name = aws_lambda_function.image_optimization.function_name
76 | principal = "cloudfront.amazonaws.com"
77 | source_arn = aws_cloudfront_distribution.this.arn
78 | qualifier = aws_lambda_alias.image_optimization.name
79 | }
80 | # # ---------------------------------------------------------------------------------------------------------------------#
81 | # Create Lambda function npm package
82 | # # ---------------------------------------------------------------------------------------------------------------------#
83 | resource "null_resource" "npm_install" {
84 | provisioner "local-exec" {
85 | command = "cd ${abspath(path.root)}/lambda/image_optimization && npm install"
86 | }
87 | triggers = {
88 | always_run = "${filesha256("${abspath(path.root)}/lambda/image_optimization/index.mjs")}"
89 | }
90 | }
91 | # # ---------------------------------------------------------------------------------------------------------------------#
92 | # Create Lambda function zip archive
93 | # # ---------------------------------------------------------------------------------------------------------------------#
94 | data "archive_file" "lambda_image_optimization" {
95 | depends_on = [null_resource.npm_install]
96 | type = "zip"
97 | source_dir = "${abspath(path.root)}/lambda/image_optimization"
98 | output_file_mode = "0666"
99 | output_path = "${abspath(path.root)}/lambda/image_optimization.zip"
100 | }
101 | # # ---------------------------------------------------------------------------------------------------------------------#
102 | # Lambda function with variables
103 | # # ---------------------------------------------------------------------------------------------------------------------#
104 | resource "aws_lambda_function" "image_optimization" {
105 | provider = aws.useast1
106 | function_name = "${local.project}-image-optimization"
107 | role = aws_iam_role.lambda.arn
108 | filename = data.archive_file.lambda_image_optimization.output_path
109 | source_code_hash = data.archive_file.lambda_image_optimization.output_base64sha256
110 | runtime = "nodejs20.x"
111 | handler = "index.handler"
112 | memory_size = 256
113 | timeout = 30
114 | publish = true
115 | environment {
116 | variables = {
117 | s3BucketRegion = aws_s3_bucket.this["media"].region
118 | originalImageBucketName = aws_s3_bucket.this["media"].id
119 | transformedImageBucketName = aws_s3_bucket.this["media-optimized"].id
120 | transformedImageCacheTTL = "max-age=31622400"
121 | maxImageSize = "4700000"
122 | }
123 | }
124 | }
125 | # # ---------------------------------------------------------------------------------------------------------------------#
126 | # Lambda function url
127 | # # ---------------------------------------------------------------------------------------------------------------------#
128 | resource "aws_lambda_function_url" "image_optimization" {
129 | provider = aws.useast1
130 | function_name = aws_lambda_function.image_optimization.function_name
131 | qualifier = aws_lambda_alias.image_optimization.name
132 | authorization_type = "AWS_IAM"
133 | }
134 | # # ---------------------------------------------------------------------------------------------------------------------#
135 | # Lambda function alias
136 | # # ---------------------------------------------------------------------------------------------------------------------#
137 | resource "aws_lambda_alias" "image_optimization" {
138 | provider = aws.useast1
139 | name = "${local.project}-image-optimization"
140 | description = "Lambda image optimization alias for ${local.project}"
141 | function_name = aws_lambda_function.image_optimization.arn
142 | function_version = "$LATEST"
143 | }
144 |
--------------------------------------------------------------------------------
/lambda/image_optimization/index.mjs:
--------------------------------------------------------------------------------
1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: MIT-0
3 |
4 | import { GetObjectCommand, PutObjectCommand, S3Client } from "@aws-sdk/client-s3";
5 | import Sharp from 'sharp';
6 |
7 | const s3Client = new S3Client({ region: process.env.s3BucketRegion });
8 | const S3_ORIGINAL_IMAGE_BUCKET = process.env.originalImageBucketName;
9 | const S3_TRANSFORMED_IMAGE_BUCKET = process.env.transformedImageBucketName;
10 | const TRANSFORMED_IMAGE_CACHE_TTL = process.env.transformedImageCacheTTL;
11 | const MAX_IMAGE_SIZE = parseInt(process.env.maxImageSize);
12 |
13 | export const handler = async (event) => {
14 | // Validate if this is a GET request
15 | if (!event.requestContext || !event.requestContext.http || !(event.requestContext.http.method === 'GET')) return sendError(400, 'Only GET method is supported', event);
16 |
17 | // Extracting the path and query parameters
18 | const path = event.requestContext.http.path;
19 | const queryStringParameters = event.queryStringParameters || {};
20 |
21 | // The image path from the URL
22 | const imagePath = path.startsWith('/') ? path.substring(1) : path;
23 |
24 | // Check if the requested file should be processed or ignored
25 | const fileExtension = path.split('.').pop().toLowerCase();
26 | const supportedExtensions = ['jpg', 'jpeg', 'png', 'webp', 'gif', 'avif', 'svg'];
27 |
28 | if (!supportedExtensions.includes(fileExtension)) {
29 | // If the file is not an image, return a 302 redirect to the original URL
30 | return {
31 | statusCode: 302,
32 | headers: {
33 | 'Location': path,
34 | 'Cache-Control': 'no-cache'
35 | }
36 | };
37 | }
38 |
39 | // Extracting operations from query parameters
40 | const width = queryStringParameters.width ? parseInt(queryStringParameters.width) : null;
41 | const height = queryStringParameters.height ? parseInt(queryStringParameters.height) : null;
42 | const format = queryStringParameters['image-type'] || null;
43 | const quality = queryStringParameters.quality ? parseInt(queryStringParameters.quality) : null;
44 |
45 | var startTime = performance.now();
46 | let originalImageBody;
47 | let contentType;
48 |
49 | try {
50 | const getOriginalImageCommand = new GetObjectCommand({ Bucket: S3_ORIGINAL_IMAGE_BUCKET, Key: imagePath });
51 | const getOriginalImageCommandOutput = await s3Client.send(getOriginalImageCommand);
52 | console.log(`Got response from S3 for ${imagePath}`);
53 |
54 | originalImageBody = await getOriginalImageCommandOutput.Body.transformToByteArray();
55 | contentType = getOriginalImageCommandOutput.ContentType;
56 | } catch (error) {
57 | return sendError(500, 'Error downloading original image', error);
58 | }
59 |
60 | let transformedImage = Sharp(originalImageBody, { failOn: 'none', animated: true });
61 | const imageMetadata = await transformedImage.metadata();
62 |
63 | var timingLog = 'img-download;dur=' + parseInt(performance.now() - startTime);
64 | startTime = performance.now();
65 |
66 | try {
67 | if (width || height) {
68 | transformedImage = transformedImage.resize({
69 | width: width,
70 | height: height,
71 | fit: Sharp.fit.inside,
72 | withoutEnlargement: true
73 | });
74 | }
75 | if (imageMetadata.orientation) transformedImage = transformedImage.rotate();
76 |
77 | if (format) {
78 | var isLossy = false;
79 | switch (format) {
80 | case 'jpeg': contentType = 'image/jpeg'; isLossy = true; break;
81 | case 'gif': contentType = 'image/gif'; break;
82 | case 'webp': contentType = 'image/webp'; isLossy = true; break;
83 | case 'png': contentType = 'image/png'; break;
84 | case 'avif': contentType = 'image/avif'; isLossy = true; break;
85 | default: contentType = 'image/jpeg'; isLossy = true;
86 | }
87 | if (quality && isLossy) {
88 | transformedImage = transformedImage.toFormat(format, { quality: quality });
89 | } else {
90 | transformedImage = transformedImage.toFormat(format);
91 | }
92 | } else {
93 | if (contentType === 'image/svg+xml') contentType = 'image/png';
94 | }
95 |
96 | transformedImage = await transformedImage.toBuffer();
97 | } catch (error) {
98 | return sendError(500, 'Error transforming image', error);
99 | }
100 |
101 | timingLog = timingLog + ',img-transform;dur=' + parseInt(performance.now() - startTime);
102 | const imageTooBig = Buffer.byteLength(transformedImage) > MAX_IMAGE_SIZE;
103 |
104 | if (S3_TRANSFORMED_IMAGE_BUCKET) {
105 | startTime = performance.now();
106 | try {
107 | const metadata = {
108 | 'cache-control': TRANSFORMED_IMAGE_CACHE_TTL,
109 | 'width': width ? width.toString() : 'empty',
110 | 'height': height ? height.toString() : 'empty',
111 | 'quality': quality ? quality.toString() : 'empty'
112 | };
113 | const putImageCommand = new PutObjectCommand({
114 | Body: transformedImage,
115 | Bucket: S3_TRANSFORMED_IMAGE_BUCKET,
116 | Key: imagePath,
117 | ContentType: contentType,
118 | Metadata: metadata
119 | });
120 | await s3Client.send(putImageCommand);
121 | timingLog = timingLog + ',img-upload;dur=' + parseInt(performance.now() - startTime);
122 |
123 | if (imageTooBig) {
124 | return {
125 | statusCode: 302,
126 | headers: {
127 | 'Location': '/' + imagePath + '?' + new URLSearchParams(queryStringParameters).toString(),
128 | 'Cache-Control': 'private,no-store',
129 | 'Server-Timing': timingLog
130 | }
131 | };
132 | }
133 | } catch (error) {
134 | logError('Could not upload transformed image to S3', error);
135 | }
136 | }
137 |
138 | if (imageTooBig) {
139 | return sendError(403, 'Requested transformed image is too big', '');
140 | } else {
141 | return {
142 | statusCode: 200,
143 | body: transformedImage.toString('base64'),
144 | isBase64Encoded: true,
145 | headers: {
146 | 'Content-Type': contentType,
147 | 'Cache-Control': TRANSFORMED_IMAGE_CACHE_TTL,
148 | 'Server-Timing': timingLog
149 | }
150 | };
151 | }
152 | };
153 |
154 | function sendError(statusCode, body, error) {
155 | logError(body, error);
156 | return { statusCode, body };
157 | }
158 |
159 | function logError(body, error) {
160 | console.log('APPLICATION ERROR', body);
161 | console.log(error);
162 | }
163 |
--------------------------------------------------------------------------------
/lambda/image_optimization/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "image-optimization",
3 | "version": "0.1.0",
4 | "dependencies": {
5 | "sharp": "^0.33.5",
6 | "@img/sharp-libvips-linux-x64": "^1.0.4",
7 | "@img/sharp-linux-x64": "^0.33.5",
8 | "@aws-sdk/client-s3": "^3.637.0"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/opensearch.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 | //////////////////////////////////////////////////////////[ OPENSEARCH ]///////////////////////////////////////////////
4 |
5 | # # ---------------------------------------------------------------------------------------------------------------------#
6 | # Create OpenSearch service linked role if not exists
7 | # # ---------------------------------------------------------------------------------------------------------------------#
8 | resource "null_resource" "es" {
9 | provisioner "local-exec" {
10 | interpreter = ["/bin/bash", "-c"]
11 | command = < /dev/null 2>&1 ; echo $?)
13 | if [[ $exit_code -ne 0 ]]; then
14 | aws iam create-service-linked-role --aws-service-name opensearchservice.amazonaws.com
15 | fi
16 | EOF
17 | }
18 | }
19 | # # ---------------------------------------------------------------------------------------------------------------------#
20 | # Create OpenSearch domain access policy
21 | # # ---------------------------------------------------------------------------------------------------------------------#
22 | data "aws_iam_policy_document" "opensearch_access" {
23 | version = "2012-10-17"
24 | statement {
25 | effect = "Allow"
26 | principals {
27 | type = "AWS"
28 | identifiers = ["*"]
29 | }
30 | actions = ["es:*"]
31 | resources = ["arn:aws:es:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:domain/${local.project}-opensearch/*"]
32 | }
33 | }
34 | # # ---------------------------------------------------------------------------------------------------------------------#
35 | # Create OpenSearch domain
36 | # # ---------------------------------------------------------------------------------------------------------------------#
37 | resource "aws_opensearch_domain" "this" {
38 | depends_on = [null_resource.es]
39 | domain_name = "${local.project}-opensearch"
40 | engine_version = var.opensearch["engine_version"]
41 | cluster_config {
42 | instance_type = var.opensearch["instance_type"]
43 | instance_count = var.opensearch["instance_count"]
44 | zone_awareness_enabled = var.opensearch["instance_count"] > 1 ? true : false
45 | dynamic "zone_awareness_config" {
46 | for_each = var.opensearch["instance_count"] > 1 ? [var.opensearch["instance_count"]] : []
47 | content {
48 | availability_zone_count = var.opensearch["instance_count"]
49 | }
50 | }
51 | }
52 | advanced_security_options {
53 | enabled = true
54 | anonymous_auth_enabled = false
55 | internal_user_database_enabled = true
56 | master_user_options {
57 | master_user_name = random_string.this["opensearch"].result
58 | master_user_password = random_password.this["opensearch"].result
59 | }
60 | }
61 | encrypt_at_rest {
62 | enabled = true
63 | }
64 | domain_endpoint_options {
65 | enforce_https = true
66 | tls_security_policy = "Policy-Min-TLS-1-2-2019-07"
67 | }
68 |
69 | node_to_node_encryption {
70 | enabled = true
71 | }
72 |
73 | ebs_options {
74 | ebs_enabled = var.opensearch["ebs_enabled"]
75 | volume_type = var.opensearch["volume_type"]
76 | volume_size = var.opensearch["volume_size"]
77 | }
78 | vpc_options {
79 | subnet_ids = slice(values(aws_subnet.this).*.id, 0, var.opensearch["instance_count"])
80 | security_group_ids = [aws_security_group.opensearch.id]
81 | }
82 | tags = {
83 | Name = "${local.project}-opensearch"
84 | }
85 | log_publishing_options {
86 | cloudwatch_log_group_arn = aws_cloudwatch_log_group.opensearch.arn
87 | log_type = var.opensearch["log_type"]
88 | }
89 | access_policies = data.aws_iam_policy_document.opensearch_access.json
90 | }
91 | # # ---------------------------------------------------------------------------------------------------------------------#
92 | # Create CloudWatch log group for OpenSearch log stream
93 | # # ---------------------------------------------------------------------------------------------------------------------#
94 | resource "aws_cloudwatch_log_group" "opensearch" {
95 | name = "${local.project}-opensearch"
96 | }
97 |
98 | data "aws_iam_policy_document" "opensearch-log-publishing-policy" {
99 | statement {
100 | actions = [
101 | "logs:CreateLogStream",
102 | "logs:PutLogEvents",
103 | "logs:PutLogEventsBatch",
104 | ]
105 | resources = ["arn:aws:logs:*"]
106 | principals {
107 | identifiers = ["es.amazonaws.com"]
108 | type = "Service"
109 | }
110 | }
111 | }
112 |
113 | resource "aws_cloudwatch_log_resource_policy" "opensearch" {
114 | policy_name = "${local.project}-opensearch"
115 | policy_document = data.aws_iam_policy_document.opensearch-log-publishing-policy.json
116 | }
117 |
--------------------------------------------------------------------------------
/production.auto.tfvars.template:
--------------------------------------------------------------------------------
1 | # Rename this file to production.auto.tfvars and customize your variables #
2 | brand = "magenx"
3 | github_repo = "magenx/Magento-2"
4 | domain = "magenx.org"
5 | admin_email = "admin@magenx.org"
6 |
--------------------------------------------------------------------------------
/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = "5.88.0"
6 | }
7 | null = {
8 | source = "hashicorp/null"
9 | version = "3.2.3"
10 | }
11 | random = {
12 | source = "hashicorp/random"
13 | version = "3.6.3"
14 | }
15 | archive = {
16 | source = "hashicorp/archive"
17 | version = "2.7.0"
18 | }
19 | }
20 | }
21 |
22 | provider "aws" {
23 | default_tags {
24 | tags = local.default_tags
25 | }
26 | }
27 | provider "aws" {
28 | alias = "useast1"
29 | region = "us-east-1"
30 | default_tags {
31 | tags = local.default_tags
32 | }
33 | }
34 | provider "null" {}
35 | provider "random" {}
36 | provider "archive" {}
37 |
--------------------------------------------------------------------------------
/rabbitmq.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////////[ AMAZON MQ BROKER ]/////////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create RabbitMQ - queue message broker
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_mq_broker" "this" {
10 | broker_name = "${local.project}-rabbitmq"
11 | engine_type = "RabbitMQ"
12 | engine_version = var.rabbitmq["engine_version"]
13 | host_instance_type = var.rabbitmq["host_instance_type"]
14 | security_groups = [aws_security_group.rabbitmq.id]
15 | subnet_ids = [values(aws_subnet.this).0.id]
16 | user {
17 | username = var.brand
18 | password = random_password.this["rabbitmq"].result
19 | }
20 | tags = {
21 | Name = "${local.project}-rabbitmq"
22 | }
23 | }
24 |
25 |
26 |
--------------------------------------------------------------------------------
/random.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////////[ RANDOM STRING GENERATOR ]////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Generate random uuid string that is intended to be used as secret header
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "random_uuid" "this" {}
10 | # # ---------------------------------------------------------------------------------------------------------------------#
11 | # Generate random passwords
12 | # # ---------------------------------------------------------------------------------------------------------------------#
13 | resource "random_password" "this" {
14 | for_each = toset(var.password)
15 | length = 16
16 | lower = true
17 | upper = true
18 | numeric = true
19 | special = true
20 | override_special = "!$"
21 | }
22 | # # ---------------------------------------------------------------------------------------------------------------------#
23 | # Generate random string
24 | # # ---------------------------------------------------------------------------------------------------------------------#
25 | resource "random_string" "this" {
26 | for_each = toset(var.string)
27 | length = 7
28 | lower = true
29 | numeric = true
30 | special = false
31 | upper = false
32 | }
33 | # # ---------------------------------------------------------------------------------------------------------------------#
34 | # Generate random stirng for s3
35 | # # ---------------------------------------------------------------------------------------------------------------------#
36 | resource "random_string" "s3" {
37 | for_each = var.s3
38 | length = 7
39 | lower = true
40 | numeric = true
41 | special = false
42 | upper = false
43 | }
44 | # # ---------------------------------------------------------------------------------------------------------------------#
45 | # Select random subnets for ASG as required availability_zones_qty
46 | # # ---------------------------------------------------------------------------------------------------------------------#
47 | resource "random_shuffle" "subnets" {
48 | input = [for subnet in aws_subnet.this : subnet.id]
49 | result_count = var.vpc["availability_zones_qty"]
50 | }
51 |
--------------------------------------------------------------------------------
/rds.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | //////////////////////////////////////////////////////////////[ RDS ]/////////////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create RDS subnet group in our dedicated VPC
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_db_subnet_group" "this" {
10 | name = "${local.project}-db-subnet"
11 | description = "RDS Subnet for ${replace(local.project,"-"," ")}"
12 | subnet_ids = random_shuffle.subnets.result
13 | tags = {
14 | Name = "${local.project}-db-subnet"
15 | }
16 | }
17 | # # ---------------------------------------------------------------------------------------------------------------------#
18 | # Create RDS parameter groups
19 | # # ---------------------------------------------------------------------------------------------------------------------#
20 | resource "aws_db_parameter_group" "this" {
21 | name = "${local.project}-parameters"
22 | family = var.rds["family"]
23 | description = "Parameter group for ${local.project} database"
24 | dynamic "parameter" {
25 | for_each = var.rds_parameters
26 | content {
27 | name = parameter.value["name"]
28 | value = parameter.value["value"]
29 | }
30 | }
31 | tags = {
32 | Name = "${local.project}-parameters"
33 | }
34 | }
35 | # # ---------------------------------------------------------------------------------------------------------------------#
36 | # Create RDS instance
37 | # # ---------------------------------------------------------------------------------------------------------------------#
38 | resource "aws_db_instance" "this" {
39 | identifier = "${local.project}-rds"
40 | allocated_storage = var.rds["allocated_storage"]
41 | max_allocated_storage = var.rds["max_allocated_storage"]
42 | storage_type = var.rds["storage_type"]
43 | storage_encrypted = var.rds["storage_encrypted"]
44 | engine = var.rds["engine"]
45 | engine_version = var.rds["engine_version"]
46 | instance_class = var.rds["instance_class"]
47 | multi_az = var.rds["multi_az"]
48 | db_name = local.db_name
49 | username = var.brand
50 | password = random_password.this["database"].result
51 | parameter_group_name = aws_db_parameter_group.this.id
52 | skip_final_snapshot = var.rds["skip_final_snapshot"]
53 | vpc_security_group_ids = [aws_security_group.rds.id]
54 | db_subnet_group_name = aws_db_subnet_group.this.name
55 | enabled_cloudwatch_logs_exports = [var.rds["enabled_cloudwatch_logs_exports"]]
56 | performance_insights_enabled = var.rds["performance_insights_enabled"]
57 | copy_tags_to_snapshot = var.rds["copy_tags_to_snapshot"]
58 | backup_retention_period = var.rds["backup_retention_period"]
59 | delete_automated_backups = var.rds["delete_automated_backups"]
60 | deletion_protection = var.rds["deletion_protection"]
61 | tags = {
62 | Name = "${local.project}"
63 | }
64 | }
65 | # # ---------------------------------------------------------------------------------------------------------------------#
66 | # Create RDS instance event subscription
67 | # # ---------------------------------------------------------------------------------------------------------------------#
68 | resource "aws_db_event_subscription" "db_event_subscription" {
69 | name = "${local.project}-rds-event-subscription"
70 | sns_topic = aws_sns_topic.default.arn
71 | source_type = "db-instance"
72 | source_ids = [aws_db_instance.this.identifier]
73 | event_categories = [
74 | "availability",
75 | "deletion",
76 | "failover",
77 | "failure",
78 | "low storage",
79 | "maintenance",
80 | "notification",
81 | "read replica",
82 | "recovery",
83 | "restoration",
84 | "configuration change"
85 | ]
86 | }
87 | # # ---------------------------------------------------------------------------------------------------------------------#
88 | # Create CloudWatch CPU Utilization metrics and email alerts
89 | # # ---------------------------------------------------------------------------------------------------------------------#
90 | resource "aws_cloudwatch_metric_alarm" "rds_cpu" {
91 | alarm_name = "${local.project} rds cpu utilization too high"
92 | comparison_operator = "GreaterThanThreshold"
93 | evaluation_periods = "1"
94 | metric_name = "CPUUtilization"
95 | namespace = "AWS/RDS"
96 | period = "600"
97 | statistic = "Average"
98 | threshold = "80"
99 | alarm_description = "Average database CPU utilization over last 10 minutes too high"
100 | alarm_actions = ["${aws_sns_topic.default.arn}"]
101 | ok_actions = ["${aws_sns_topic.default.arn}"]
102 |
103 | dimensions = {
104 | DBInstanceIdentifier = aws_db_instance.this.id
105 | }
106 | }
107 | # # ---------------------------------------------------------------------------------------------------------------------#
108 | # Create CloudWatch Freeable Memory metrics and email alerts
109 | # # ---------------------------------------------------------------------------------------------------------------------#
110 | resource "aws_cloudwatch_metric_alarm" "rds_memory" {
111 | alarm_name = "${local.project} rds freeable memory too low"
112 | comparison_operator = "LessThanThreshold"
113 | evaluation_periods = "1"
114 | metric_name = "FreeableMemory"
115 | namespace = "AWS/RDS"
116 | period = "600"
117 | statistic = "Average"
118 | threshold = "1.0e+09"
119 | alarm_description = "Average database freeable memory over last 10 minutes too low, performance may suffer"
120 | alarm_actions = ["${aws_sns_topic.default.arn}"]
121 | ok_actions = ["${aws_sns_topic.default.arn}"]
122 |
123 | dimensions = {
124 | DBInstanceIdentifier = aws_db_instance.this.id
125 | }
126 | }
127 | # # ---------------------------------------------------------------------------------------------------------------------#
128 | # Create CloudWatch Connections Anomaly metrics and email alerts
129 | # # ---------------------------------------------------------------------------------------------------------------------#
130 | resource "aws_cloudwatch_metric_alarm" "rds_connections_anomaly" {
131 | alarm_name = "${local.project} rds connections anomaly"
132 | comparison_operator = "GreaterThanUpperThreshold"
133 | evaluation_periods = "5"
134 | threshold_metric_id = "e1"
135 | alarm_description = "Database connection count anomaly detected"
136 | alarm_actions = ["${aws_sns_topic.default.arn}"]
137 | ok_actions = ["${aws_sns_topic.default.arn}"]
138 |
139 | insufficient_data_actions = []
140 |
141 | metric_query {
142 | id = "e1"
143 | expression = "ANOMALY_DETECTION_BAND(m1, 2)"
144 | label = "DatabaseConnections (Expected)"
145 | return_data = "true"
146 | }
147 |
148 | metric_query {
149 | id = "m1"
150 | return_data = "true"
151 | metric {
152 | metric_name = "DatabaseConnections"
153 | namespace = "AWS/RDS"
154 | period = "600"
155 | stat = "Average"
156 | unit = "Count"
157 |
158 | dimensions = {
159 | DBInstanceIdentifier = aws_db_instance.this.id
160 | }
161 | }
162 | }
163 | }
164 | # # ---------------------------------------------------------------------------------------------------------------------#
165 | # Create CloudWatch Max Connections metrics and email alerts
166 | # # ---------------------------------------------------------------------------------------------------------------------#
167 | resource "aws_cloudwatch_metric_alarm" "rds_max_connections" {
168 | alarm_name = "${local.project} rds connections over last 10 minutes is too high"
169 | comparison_operator = "GreaterThanThreshold"
170 | evaluation_periods = "1"
171 | metric_name = "DatabaseConnections"
172 | namespace = "AWS/RDS"
173 | period = "600"
174 | statistic = "Average"
175 | threshold = ceil((80 / 100) * var.max_connection_count[var.rds["instance_class"]])
176 | alarm_description = "Average connections over last 10 minutes is too high"
177 | alarm_actions = ["${aws_sns_topic.default.arn}"]
178 | ok_actions = ["${aws_sns_topic.default.arn}"]
179 |
180 | dimensions = {
181 | DBInstanceIdentifier = aws_db_instance.this.id
182 | }
183 | }
184 |
185 |
186 |
187 |
--------------------------------------------------------------------------------
/route53.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////////[ ROUTE53 MAIN ZONE RECORD ]/////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create Route53 zone with cname record domain -> cloudfront
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_route53_zone" "main" {
10 | name = var.domain
11 | }
12 |
13 | resource "aws_route53_record" "cname" {
14 | zone_id = aws_route53_zone.main.id
15 | name = var.domain
16 | type = "A"
17 |
18 | alias {
19 | name = aws_cloudfront_distribution.this.domain_name
20 | zone_id = aws_cloudfront_distribution.this.hosted_zone_id
21 | evaluate_target_health = true
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/s3.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | //////////////////////////////////////////////////////////[ S3 BUCKET ]///////////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create S3 bucket
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_s3_bucket" "this" {
10 | for_each = var.s3
11 | bucket = "${local.project}-${each.key}"
12 | force_destroy = true
13 | tags = {
14 | Name = "${local.project}-${each.key}"
15 | }
16 | }
17 | # # ---------------------------------------------------------------------------------------------------------------------#
18 | # Create S3 bucket ownership configuration
19 | # # ---------------------------------------------------------------------------------------------------------------------#
20 | resource "aws_s3_bucket_ownership_controls" "this" {
21 | for_each = aws_s3_bucket.this
22 | bucket = each.value.id
23 | rule {
24 | object_ownership = "BucketOwnerPreferred"
25 | }
26 | }
27 | # # ---------------------------------------------------------------------------------------------------------------------#
28 | # Create S3 bucket encryption
29 | # # ---------------------------------------------------------------------------------------------------------------------#
30 | resource "aws_s3_bucket_server_side_encryption_configuration" "this" {
31 | for_each = aws_s3_bucket.this
32 | bucket = aws_s3_bucket.this[each.key].id
33 | rule {
34 | apply_server_side_encryption_by_default {
35 | sse_algorithm = "AES256"
36 | }
37 | }
38 | }
39 | # # ---------------------------------------------------------------------------------------------------------------------#
40 | # Block public access acl for internal S3 buckets
41 | # # ---------------------------------------------------------------------------------------------------------------------#
42 | resource "aws_s3_bucket_public_access_block" "this" {
43 | for_each = aws_s3_bucket.this
44 | bucket = aws_s3_bucket.this[each.key].id
45 | block_public_acls = true
46 | block_public_policy = true
47 | ignore_public_acls = true
48 | restrict_public_buckets = true
49 | }
50 | # # ---------------------------------------------------------------------------------------------------------------------#
51 | # Cleanup maedia optimized bucket filter
52 | # # ---------------------------------------------------------------------------------------------------------------------#
53 | resource "aws_s3_bucket_lifecycle_configuration" "this" {
54 | bucket = aws_s3_bucket.this["media-optimized"].id
55 | rule {
56 | id = "${local.project}-cleanup-images"
57 | status = "Enabled"
58 | expiration {
59 | days = 365
60 | }
61 | }
62 | }
63 | # # ---------------------------------------------------------------------------------------------------------------------#
64 | # Create policy to limit S3 media bucket access
65 | # # ---------------------------------------------------------------------------------------------------------------------#
66 | data "aws_iam_policy_document" "media" {
67 | statement {
68 | sid = "AllowCloudFrontAccess"
69 | effect = "Allow"
70 | actions = ["s3:GetObject"]
71 | resources = ["${aws_s3_bucket.this["media"].arn}/*"]
72 | principals {
73 | type = "AWS"
74 | identifiers = [aws_cloudfront_origin_access_identity.this.iam_arn]
75 | }
76 | }
77 |
78 | statement {
79 | sid = "AllowLambdaGet"
80 | effect = "Allow"
81 | actions = ["s3:GetObject"]
82 | resources = ["${aws_s3_bucket.this["media"].arn}/*"]
83 | principals {
84 | type = "AWS"
85 | identifiers = [aws_iam_role.lambda.arn]
86 | }
87 | }
88 |
89 | statement {
90 | sid = "AllowEC2PutObject"
91 | effect = "Allow"
92 | actions = ["s3:PutObject"]
93 | resources = ["${aws_s3_bucket.this["media"].arn}/*"]
94 | principals {
95 | type = "AWS"
96 | identifiers = values(aws_iam_role.ec2)[*].arn
97 | }
98 | condition {
99 | test = "StringNotEquals"
100 | variable = "aws:SourceVpc"
101 | values = [aws_vpc.this.id]
102 | }
103 | }
104 |
105 | statement {
106 | sid = "AllowEC2GetObject"
107 | effect = "Allow"
108 | actions = ["s3:GetObject", "s3:GetObjectAcl"]
109 | resources = ["${aws_s3_bucket.this["media"].arn}/*"]
110 | principals {
111 | type = "AWS"
112 | identifiers = values(aws_iam_role.ec2)[*].arn
113 | }
114 | }
115 |
116 | statement {
117 | sid = "AllowEC2ListBucket"
118 | effect = "Allow"
119 | actions = ["s3:GetBucketLocation", "s3:ListBucket"]
120 | resources = ["${aws_s3_bucket.this["media"].arn}","${aws_s3_bucket.this["media"].arn}/*"]
121 | principals {
122 | type = "AWS"
123 | identifiers = values(aws_iam_role.ec2)[*].arn
124 | }
125 | }
126 | }
127 | # # ---------------------------------------------------------------------------------------------------------------------#
128 | # Create policy to limit S3 media optimized bucket access
129 | # # ---------------------------------------------------------------------------------------------------------------------#
130 | data "aws_iam_policy_document" "mediaoptimized" {
131 | statement {
132 | sid = "AllowLambdaGetPut"
133 | effect = "Allow"
134 | actions = ["s3:PutObject","s3:GetObject"]
135 | resources = ["${aws_s3_bucket.this["media-optimized"].arn}/*"]
136 | principals {
137 | type = "AWS"
138 | identifiers = [aws_iam_role.lambda.arn]
139 | }
140 | }
141 | }
142 |
143 | resource "aws_s3_bucket_policy" "mediaoptimized" {
144 | bucket = aws_s3_bucket.this["media-optimized"].id
145 | policy = data.aws_iam_policy_document.mediaoptimized.json
146 | }
147 |
148 | resource "aws_s3_bucket_policy" "media" {
149 | bucket = aws_s3_bucket.this["media"].id
150 | policy = data.aws_iam_policy_document.media.json
151 | }
152 | # # ---------------------------------------------------------------------------------------------------------------------#
153 | # Create S3 bucket policy for ALB to write access logs
154 | # # ---------------------------------------------------------------------------------------------------------------------#
155 | data "aws_iam_policy_document" "system" {
156 | statement {
157 | sid = "AllowSSMAgentS3Access"
158 | effect = "Allow"
159 | actions = [
160 | "s3:PutObject",
161 | "s3:GetObject"
162 | ]
163 | resources = [
164 | "${aws_s3_bucket.this["system"].arn}/*"
165 | ]
166 | principals {
167 | type = "Service"
168 | identifiers = ["ssm.amazonaws.com"]
169 | }
170 | }
171 |
172 | statement {
173 | sid = "ALBWriteLogs"
174 | effect = "Allow"
175 | actions = [
176 | "s3:PutObject"
177 | ]
178 | resources = ["${aws_s3_bucket.this["system"].arn}/ALB/AWSLogs/${data.aws_caller_identity.current.account_id}/*"]
179 | principals {
180 | type = "AWS"
181 | identifiers = [data.aws_elb_service_account.current.arn]
182 | }
183 | }
184 |
185 | statement {
186 | sid = "AllowCodeDeployS3Access"
187 | effect = "Allow"
188 | actions = [
189 | "s3:PutObject",
190 | "s3:GetObject"
191 | ]
192 | resources = [
193 | "${aws_s3_bucket.this["system"].arn}/*"
194 | ]
195 | principals {
196 | type = "AWS"
197 | identifiers = [
198 | aws_iam_role.codedeploy.arn
199 | ]
200 | }
201 | }
202 |
203 | statement {
204 | sid = "CloudFrontAccess"
205 | effect = "Allow"
206 | actions = ["s3:PutObject"]
207 | resources = ["${aws_s3_bucket.this["system"].arn}/CloudFront/*"]
208 | principals {
209 | type = "Service"
210 | identifiers = ["cloudfront.amazonaws.com"]
211 | }
212 | condition {
213 | test = "StringEquals"
214 | variable = "AWS:SourceAccount"
215 | values = [data.aws_caller_identity.current.account_id]
216 | }
217 | condition {
218 | test = "ArnLike"
219 | variable = "AWS:SourceArn"
220 | values = ["arn:aws:cloudfront::${data.aws_caller_identity.current.account_id}:distribution/*"]
221 | }
222 | }
223 |
224 | statement {
225 | sid = "AllowLambdaGet"
226 | effect = "Allow"
227 | actions = [
228 | "s3:GetObject"
229 | ]
230 | resources = ["${aws_s3_bucket.this["system"].arn}/lambda/*"]
231 | principals {
232 | type = "AWS"
233 | identifiers = [aws_iam_role.lambda.arn]
234 | }
235 | }
236 | }
237 |
238 | resource "aws_s3_bucket_policy" "system" {
239 | bucket = aws_s3_bucket.this["system"].id
240 | policy = data.aws_iam_policy_document.system.json
241 | }
242 | # # ---------------------------------------------------------------------------------------------------------------------#
243 | # Create S3 bucket policy for CodePipeline access
244 | # # ---------------------------------------------------------------------------------------------------------------------#
245 | data "aws_iam_policy_document" "backup" {
246 | statement {
247 | actions = ["s3:PutObject"]
248 | effect = "Allow"
249 | resources = ["${aws_s3_bucket.this["backup"].arn}/*"]
250 | principals {
251 | type = "AWS"
252 | identifiers = [
253 | aws_iam_role.codedeploy.arn
254 | ]
255 | }
256 | }
257 | version = "2012-10-17"
258 | }
259 |
260 | resource "aws_s3_bucket_policy" "backup" {
261 | bucket = aws_s3_bucket.this["backup"].id
262 | policy = data.aws_iam_policy_document.backup.json
263 | }
264 |
265 |
266 |
--------------------------------------------------------------------------------
/security_groups.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ////////////////////////////////////////////////////////[ SECURITY GROUPS ]///////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create security group and rules for ALB
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_security_group" "alb" {
10 | name = "${local.project}-alb-sg"
11 | description = "Security group rules for ${local.project} ALB"
12 | vpc_id = aws_vpc.this.id
13 | tags = {
14 | Name = "${local.project}-alb-sg"
15 | }
16 | }
17 |
18 | resource "aws_vpc_security_group_ingress_rule" "alb" {
19 | description = "Security group rules for ALB ingress"
20 | security_group_id = aws_security_group.alb.id
21 | ip_protocol = "-1"
22 | cidr_ipv4 = "0.0.0.0/0"
23 | tags = {
24 | Name = "${local.project}-alb-ingress-sg"
25 | }
26 | }
27 |
28 | resource "aws_vpc_security_group_egress_rule" "alb" {
29 | description = "Security group rules for ALB egress"
30 | security_group_id = aws_security_group.alb.id
31 | ip_protocol = "-1"
32 | cidr_ipv4 = "0.0.0.0/0"
33 | tags = {
34 | Name = "${local.project}-alb-egress-sg"
35 | }
36 | }
37 |
38 |
39 | # # ---------------------------------------------------------------------------------------------------------------------#
40 | # Create security group and rules for EC2
41 | # # ---------------------------------------------------------------------------------------------------------------------#
42 | resource "aws_security_group" "ec2" {
43 | name = "${local.project}-ec2-sg"
44 | description = "Security group rules for ${local.project} EC2"
45 | vpc_id = aws_vpc.this.id
46 |
47 | tags = {
48 | Name = "${local.project}-ec2-sg"
49 | }
50 | }
51 |
52 | resource "aws_security_group_rule" "ec2_https_out" {
53 | type = "egress"
54 | description = "Allow outbound traffic on the instance https port"
55 | from_port = 443
56 | to_port = 443
57 | protocol = "tcp"
58 | cidr_blocks = ["0.0.0.0/0"]
59 | security_group_id = aws_security_group.ec2.id
60 | }
61 |
62 | resource "aws_security_group_rule" "ec2_http_out" {
63 | type = "egress"
64 | description = "Allow outbound traffic on the instance http port"
65 | from_port = 80
66 | to_port = 80
67 | protocol = "tcp"
68 | cidr_blocks = ["0.0.0.0/0"]
69 | security_group_id = aws_security_group.ec2.id
70 | }
71 |
72 | resource "aws_security_group_rule" "ec2_mysql_out" {
73 | type = "egress"
74 | description = "Allow outbound traffic on the instance MySql port"
75 | from_port = 3306
76 | to_port = 3306
77 | protocol = "tcp"
78 | source_security_group_id = aws_security_group.rds.id
79 | security_group_id = aws_security_group.ec2.id
80 | }
81 |
82 | resource "aws_security_group_rule" "ec2_rabbitmq_out" {
83 | type = "egress"
84 | description = "Allow outbound traffic on the instance RabbitMQ port"
85 | from_port = 5671
86 | to_port = 5671
87 | protocol = "tcp"
88 | source_security_group_id = aws_security_group.rabbitmq.id
89 | security_group_id = aws_security_group.ec2.id
90 | }
91 |
92 | resource "aws_security_group_rule" "ec2_redis_cache_out" {
93 | type = "egress"
94 | description = "Allow outbound traffic on the instance Redis port"
95 | from_port = 6379
96 | to_port = 6379
97 | protocol = "tcp"
98 | source_security_group_id = aws_security_group.redis.id
99 | security_group_id = aws_security_group.ec2.id
100 | }
101 |
102 | resource "aws_security_group_rule" "ec2_efs_out" {
103 | type = "egress"
104 | description = "Allow outbound traffic on the instance NFS port"
105 | from_port = 2049
106 | to_port = 2049
107 | protocol = "tcp"
108 | source_security_group_id = aws_security_group.efs.id
109 | security_group_id = aws_security_group.ec2.id
110 | }
111 |
112 | resource "aws_security_group_rule" "ec2_ses_out" {
113 | type = "egress"
114 | description = "Allow outbound traffic on the region SES port"
115 | from_port = 587
116 | to_port = 587
117 | protocol = "tcp"
118 | cidr_blocks = ["0.0.0.0/0"]
119 | security_group_id = aws_security_group.ec2.id
120 | }
121 |
122 | resource "aws_security_group_rule" "ec2_opensearch_out" {
123 | type = "egress"
124 | description = "Allow outbound traffic on the instance opensearch port"
125 | from_port = 9200
126 | to_port = 9200
127 | protocol = "tcp"
128 | source_security_group_id = aws_security_group.opensearch.id
129 | security_group_id = aws_security_group.ec2.id
130 | }
131 |
132 | resource "aws_security_group_rule" "ec2_http_in_ec2" {
133 | type = "ingress"
134 | description = "Allow all inbound traffic from ec2 on http port"
135 | from_port = 80
136 | to_port = 80
137 | protocol = "tcp"
138 | source_security_group_id = aws_security_group.ec2.id
139 | security_group_id = aws_security_group.ec2.id
140 | }
141 |
142 | resource "aws_security_group_rule" "ec2_http" {
143 | type = "ingress"
144 | description = "Allow all inbound traffic from the load balancer on http port"
145 | from_port = 80
146 | to_port = 80
147 | protocol = "tcp"
148 | source_security_group_id = aws_security_group.alb.id
149 | security_group_id = aws_security_group.ec2.id
150 | }
151 |
152 | # # ---------------------------------------------------------------------------------------------------------------------#
153 | # Create security group and rules for RDS
154 | # # ---------------------------------------------------------------------------------------------------------------------#
155 | resource "aws_security_group" "rds" {
156 | name = "${local.project}-rds-sg"
157 | description = "Security group rules for ${local.project} RDS"
158 | vpc_id = aws_vpc.this.id
159 |
160 | ingress {
161 | description = "Allow all inbound traffic to MySQL port from EC2"
162 | from_port = 3306
163 | to_port = 3306
164 | protocol = "tcp"
165 | security_groups = [aws_security_group.ec2.id]
166 | }
167 |
168 | tags = {
169 | Name = "${local.project}-rds-sg"
170 | }
171 | }
172 |
173 | # # ---------------------------------------------------------------------------------------------------------------------#
174 | # Create security group and rules for ElastiCache
175 | # # ---------------------------------------------------------------------------------------------------------------------#
176 | resource "aws_security_group" "redis" {
177 | name = "${local.project}-redis-sg"
178 | description = "Security group rules for ${local.project} ElastiCache"
179 | vpc_id = aws_vpc.this.id
180 |
181 | ingress {
182 | description = "Allow all inbound traffic to Redis port from EC2"
183 | from_port = 6379
184 | to_port = 6379
185 | protocol = "tcp"
186 | security_groups = [aws_security_group.ec2.id]
187 | }
188 |
189 | tags = {
190 | Name = "${local.project}-redis-sg"
191 | }
192 | }
193 |
194 | # # ---------------------------------------------------------------------------------------------------------------------#
195 | # Create security group and rules for RabbitMQ
196 | # # ---------------------------------------------------------------------------------------------------------------------#
197 | resource "aws_security_group" "rabbitmq" {
198 | name = "${local.project}-rabbitmq-sg"
199 | description = "Security group rules for ${local.project} RabbitMQ"
200 | vpc_id = aws_vpc.this.id
201 |
202 | ingress {
203 | description = "Allow all inbound traffic to RabbitMQ port from EC2"
204 | from_port = 5671
205 | to_port = 5671
206 | protocol = "tcp"
207 | security_groups = [aws_security_group.ec2.id]
208 | }
209 |
210 | tags = {
211 | Name = "${local.project}-rabbitmq-sg"
212 | }
213 | }
214 |
215 | # # ---------------------------------------------------------------------------------------------------------------------#
216 | # Create security group and rules for EFS
217 | # # ---------------------------------------------------------------------------------------------------------------------#
218 | resource "aws_security_group" "efs" {
219 | name = "${local.project}-efs-sg"
220 | description = "Security group rules for ${local.project} EFS"
221 | vpc_id = aws_vpc.this.id
222 |
223 | ingress {
224 | description = "Allow all inbound traffic to EFS port from EC2"
225 | from_port = 0
226 | to_port = 0
227 | protocol = "-1"
228 | security_groups = [aws_security_group.ec2.id]
229 | }
230 |
231 | egress {
232 | description = "Allow all outbound traffic to EC2 port from EFS"
233 | from_port = 0
234 | to_port = 0
235 | protocol = "-1"
236 | security_groups = [aws_security_group.ec2.id]
237 | }
238 |
239 | tags = {
240 | Name = "${local.project}-efs-sg"
241 | }
242 | }
243 |
244 | # # ---------------------------------------------------------------------------------------------------------------------#
245 | # Create security group and rules for opensearch
246 | # # ---------------------------------------------------------------------------------------------------------------------#
247 | resource "aws_security_group" "opensearch" {
248 | name = "${local.project}-opensearch-sg"
249 | description = "Security group rules for ${local.project} opensearch"
250 | vpc_id = aws_vpc.this.id
251 |
252 | ingress {
253 | description = "Allow all inbound traffic to opensearch port from EC2"
254 | from_port = 0
255 | to_port = 0
256 | protocol = "-1"
257 | security_groups = [aws_security_group.ec2.id]
258 | }
259 |
260 | egress {
261 | description = "Allow all outbound traffic to EC2 port from opensearch"
262 | from_port = 0
263 | to_port = 0
264 | protocol = "-1"
265 | security_groups = [aws_security_group.ec2.id]
266 | }
267 |
268 | tags = {
269 | Name = "${local.project}-opensearch-sg"
270 | }
271 | }
272 | # # ---------------------------------------------------------------------------------------------------------------------#
273 | # Create security group and rules for lambda
274 | # # ---------------------------------------------------------------------------------------------------------------------#
275 | resource "aws_security_group" "lambda" {
276 | vpc_id = aws_vpc.this.id
277 | name = "${local.project}-lambda-sg"
278 | description = "Security group rules for ${local.project} Lambda function access"
279 |
280 | egress {
281 | from_port = 0
282 | to_port = 0
283 | protocol = "-1"
284 | cidr_blocks = ["0.0.0.0/0"]
285 | }
286 | tags = {
287 | Name = "${local.project}-lambda-sg"
288 | }
289 | }
290 |
--------------------------------------------------------------------------------
/ses.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ////////////////////////////////////////////////////[ AMAZON SIMPLE EMAIL SERVICE ]///////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SES user credentials, Configuration Set to stream SES metrics to CloudWatch
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_iam_user" "ses_smtp_user" {
10 | name = "${local.project}-ses-smtp-user"
11 | }
12 |
13 | resource "aws_ses_email_identity" "ses_email_identity" {
14 | email = "${var.admin_email}"
15 | }
16 |
17 | resource "aws_iam_user_policy" "ses_smtp_user_policy" {
18 | name = "${local.project}-ses-smtp-user-policy"
19 | user = aws_iam_user.ses_smtp_user.name
20 |
21 | policy = jsonencode({
22 | Version : "2012-10-17",
23 | Statement : [
24 | {
25 | Effect : "Allow",
26 | Action : [
27 | "ses:SendEmail",
28 | "ses:SendRawEmail"
29 | ],
30 | Resource : "*"
31 | }
32 | ]
33 | })
34 | }
35 |
36 | resource "aws_iam_access_key" "ses_smtp_user_access_key" {
37 | user = aws_iam_user.ses_smtp_user.name
38 | }
39 |
40 | resource "aws_ses_configuration_set" "this" {
41 | name = "${local.project}-ses-events"
42 | reputation_metrics_enabled = true
43 | delivery_options {
44 | tls_policy = "Require"
45 | }
46 | }
47 |
48 | resource "aws_ses_event_destination" "cloudwatch" {
49 | name = "${local.project}-ses-event-destination-cloudwatch"
50 | configuration_set_name = aws_ses_configuration_set.this.name
51 | enabled = true
52 | matching_types = ["bounce", "send", "complaint", "delivery"]
53 |
54 | cloudwatch_destination {
55 | default_value = "default"
56 | dimension_name = "dimension"
57 | value_source = "emailHeader"
58 | }
59 | }
60 |
61 |
62 |
--------------------------------------------------------------------------------
/sns.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ////////////////////////////////////////////////////[ SNS SUBSCRIPTION TOPIC ]////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SNS topic and email subscription (confirm email right after resource creation)
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_sns_topic" "default" {
10 | name = "${local.project}-email-alerts"
11 | }
12 | resource "aws_sns_topic_subscription" "default" {
13 | topic_arn = aws_sns_topic.default.arn
14 | protocol = "email"
15 | endpoint = var.admin_email
16 | }
17 |
--------------------------------------------------------------------------------
/sqs.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////////[ SQS DEAD LETTER QUEUE ]////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SQS queue to collect failed events debug messages
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_sqs_queue" "dead_letter_queue" {
10 | name = "${local.project}-dead-letter-queue"
11 | delay_seconds = 5
12 | max_message_size = 262144
13 | message_retention_seconds = 1209600
14 | receive_wait_time_seconds = 5
15 | tags = {
16 | Name = "${local.project}-dead-letter-queue"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/ssm_cloudmap_deregister.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ////////////////////////////////////////[ SYSTEM MANAGER DOCUMENT CLOUDMAP DEREGISTER ]///////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SSM Document to deregister EC2 instances in Cloudmap Service
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_ssm_document" "cloudmap_deregister" {
10 | name = "CloudMapDeregister"
11 | document_format = "YAML"
12 | document_type = "Automation"
13 | content = < /dev/null 2>&1 && echo "SUCCESS: Ansible ping worked!" || { echo "ERROR: Ansible ping failed!"; exit 1; }
63 | INSTANCE_NAME=$(metadata tags/instance/InstanceName)
64 | INSTANCE_IP=$(metadata local-ipv4)
65 | SETUP_DIRECTORY="/opt/${var.brand}/setup"
66 | INSTANCE_DIRECTORY="$${SETUP_DIRECTORY}/$${INSTANCE_NAME}"
67 | mkdir -p "$${INSTANCE_DIRECTORY}"
68 | touch $${SETUP_DIRECTORY}/init
69 | S3_OPTIONS="--quiet --exact-timestamps --delete"
70 | sudo /root/awscli/bin/aws s3 sync "s3://${aws_s3_bucket.this["system"].bucket}/setup/$${INSTANCE_NAME}" "$${INSTANCE_DIRECTORY}" $${S3_OPTIONS}
71 | find $${INSTANCE_DIRECTORY}/ -type f -name '*.y*ml' -delete
72 | cd $${INSTANCE_DIRECTORY}/
73 | unzip -o $${INSTANCE_NAME}.zip
74 | sudo /root/.local/bin/ansible-playbook -i localhost -c local -e "SSM=True instance_name=$${INSTANCE_NAME} instance_ip=$${INSTANCE_IP}" -v $${INSTANCE_DIRECTORY}/$${INSTANCE_NAME}.yml
75 | CloudWatchOutputConfig:
76 | CloudWatchLogGroupName: "${local.project}-InstanceConfiguration"
77 | CloudWatchOutputEnabled: true
78 | - name: "SendExecutionLog"
79 | action: "aws:executeAwsApi"
80 | isEnd: true
81 | inputs:
82 | Service: "sns"
83 | Api: "Publish"
84 | TopicArn: "${aws_sns_topic.default.arn}"
85 | Subject: "Instance configuration for ${local.project}"
86 | Message: "Instance {{ ExtractInstanceName.InstanceName }} {{ FilterInstancesByNameTag.InstanceIds }} configuration {{ automation:EXECUTION_ID }} completed at {{ global:DATE_TIME }}"
87 | EOF
88 | }
89 |
--------------------------------------------------------------------------------
/ssm_cwa_config.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /////////////////////////////////////////////////////////[ SYSTEMS MANAGER ]//////////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SSM Parameter configuration file for CloudWatch Agent
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_ssm_parameter" "cloudwatch_agent_config" {
10 | for_each = var.ec2
11 | name = "/cloudwatch-agent/amazon-cloudwatch-agent-${each.key}.json"
12 | description = "Configuration file for CloudWatch agent at ${each.key} for ${local.project}"
13 | type = "String"
14 | value = < /usr/local/bin/parameterstore
80 | #!/bin/bash
81 | parameterstore() {
82 | local KEY=$1
83 | local PARAMETER_NAME="/${local.project}/$${KEY}"
84 | sudo /root/awscli/bin/aws ssm get-parameter --name "$${PARAMETER_NAME}" --with-decryption --query 'Parameter.Value' --output text
85 | }
86 | if [ "$#" -eq 0 ]; then
87 | echo "Usage: $0 "
88 | echo "Example: $0 BRAND"
89 | exit 1
90 | fi
91 | KEY=$1
92 | parameterstore "$${KEY}"
93 | END
94 | chmod +x /usr/local/bin/parameterstore
95 | ### EC2 Metadata Request Script
96 | cat <<'END' > /usr/local/bin/metadata
97 | #!/bin/bash
98 | METADATA_URL="http://169.254.169.254/latest"
99 | metadata() {
100 | local FIELD=$1
101 | TOKEN=$(curl -sSf -X PUT "$${METADATA_URL}/api/token" \
102 | -H "X-aws-ec2-metadata-token-ttl-seconds: 300") || {
103 | echo "Error: Unable to fetch token. Ensure IMDSv2 is enabled." >&2
104 | exit 1
105 | }
106 | curl -sSf -X GET "$${METADATA_URL}/meta-data/$${FIELD}" \
107 | -H "X-aws-ec2-metadata-token: $${TOKEN}" || {
108 | echo "Error: Unable to fetch metadata for field $${FIELD}." >&2
109 | exit 1
110 | }
111 | }
112 | if [ "$#" -eq 0 ]; then
113 | echo "Usage: $0 "
114 | echo "Example: $0 instance-id"
115 | exit 1
116 | fi
117 | FIELD=$1
118 | metadata "$${FIELD}"
119 | END
120 | chmod +x /usr/local/bin/metadata
121 | ### Write leader instance script
122 | cat <<'END' > /usr/local/bin/leader
123 | INSTANCE_ID=$(metadata instance-id)
124 | LEADER_INSTANCE_ID=$(sudo /root/awscli/bin/aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names ${aws_autoscaling_group.this["frontend"].name} --region ${data.aws_region.current.name} --output json | \
125 | jq -r '.AutoScalingGroups[].Instances[] | select(.LifecycleState=="InService") | .InstanceId' | sort | head -1)
126 | [ "$${LEADER_INSTANCE_ID}" = "$${INSTANCE_ID}" ]
127 | END
128 | chmod +x /usr/local/bin/leader
129 | Targets:
130 | - Key: "InstanceIds"
131 | Values:
132 | - "{{ InstanceIds }}"
133 | CloudWatchOutputConfig:
134 | CloudWatchOutputEnabled: true
135 | - name: "InstallBasePackages"
136 | action: "aws:runCommand"
137 | inputs:
138 | DocumentName: "AWS-RunShellScript"
139 | Parameters:
140 | commands:
141 | - |-
142 | #!/bin/bash
143 | apt-get -qqy update
144 | apt-get -qqy install jq apt-transport-https lsb-release ca-certificates curl gnupg software-properties-common snmp syslog-ng-core unzip pipx
145 | # install awscli v2
146 | mkdir /tmp/awscli
147 | cd /tmp/awscli
148 | curl "https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" -o "awscliv2.zip"
149 | unzip awscliv2.zip
150 | bash ./aws/install
151 | sudo ./aws/install --bin-dir /root/awscli/bin --install-dir /root/awscli/awscli --update
152 | INSTANCE_NAME="$(metadata tags/instance/InstanceName)"
153 | hostnamectl set-hostname $${INSTANCE_NAME}.${var.brand}.internal
154 | if [ "$${INSTANCE_NAME}" = "frontend" ]; then
155 | apt -qqy install ruby
156 | cd /tmp
157 | wget https://aws-codedeploy-${data.aws_region.current.name}.s3.amazonaws.com/latest/install
158 | chmod +x ./install
159 | ./install auto
160 | fi
161 | cd /tmp
162 | wget https://amazoncloudwatch-agent.s3.amazonaws.com/debian/arm64/latest/amazon-cloudwatch-agent.deb
163 | dpkg -i amazon-cloudwatch-agent.deb
164 | /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c ssm:/cloudwatch-agent/amazon-cloudwatch-agent-$${INSTANCE_NAME}.json
165 | Targets:
166 | - Key: "InstanceIds"
167 | Values:
168 | - "{{ InstanceIds }}"
169 | CloudWatchOutputConfig:
170 | CloudWatchOutputEnabled: true
171 | - name: "InstanceConfiguration"
172 | action: "aws:runCommand"
173 | nextStep: "GetCloudMapServiceIdFromInstanceTag"
174 | isCritical: true
175 | isEnd: false
176 | onFailure: Abort
177 | inputs:
178 | DocumentName: "AWS-RunShellScript"
179 | InstanceIds:
180 | - "{{ InstanceIds }}"
181 | Parameters:
182 | commands:
183 | - |-
184 | #!/bin/bash
185 | sudo pipx ensurepath
186 | export PATH="$PATH:/root/.local/bin"
187 | sudo pipx install ansible-core
188 | sudo /root/.local/bin/ansible localhost -m ping > /dev/null 2>&1 && echo "SUCCESS: Ansible ping worked!" || { echo "ERROR: Ansible ping failed!"; exit 1; }
189 | INSTANCE_NAME=$(metadata tags/instance/InstanceName)
190 | INSTANCE_IP=$(metadata local-ipv4)
191 | SETUP_DIRECTORY="/opt/${var.brand}/setup"
192 | INSTANCE_DIRECTORY="$${SETUP_DIRECTORY}/$${INSTANCE_NAME}"
193 | mkdir -p "$${INSTANCE_DIRECTORY}"
194 | touch $${SETUP_DIRECTORY}/init
195 | S3_OPTIONS="--quiet --exact-timestamps --delete"
196 | sudo /root/awscli/bin/aws s3 sync "s3://${aws_s3_bucket.this["system"].bucket}/setup/$${INSTANCE_NAME}" "$${INSTANCE_DIRECTORY}" $${S3_OPTIONS}
197 | find $${INSTANCE_DIRECTORY}/ -type f -name '*.y*ml' -delete
198 | cd $${INSTANCE_DIRECTORY}/
199 | unzip -o $${INSTANCE_NAME}.zip
200 | sudo /root/.local/bin/ansible-playbook -i localhost -c local -e "SSM=True instance_name=$${INSTANCE_NAME} instance_ip=$${INSTANCE_IP}" -v $${INSTANCE_DIRECTORY}/$${INSTANCE_NAME}.yml
201 | CloudWatchOutputConfig:
202 | CloudWatchLogGroupName: "${local.project}-InstanceConfiguration"
203 | CloudWatchOutputEnabled: true
204 | - name: "GetCloudMapServiceIdFromInstanceTag"
205 | action: "aws:executeAwsApi"
206 | inputs:
207 | Service: "ec2"
208 | Api: "DescribeTags"
209 | Filters:
210 | - Name: "resource-id"
211 | Values:
212 | - "{{ InstanceIds }}"
213 | - Name: "key"
214 | Values:
215 | - "CloudmapId"
216 | outputs:
217 | - Name: "CloudMapServiceId"
218 | Selector: "$.Tags[0].Value"
219 | Type: "String"
220 | - name: "GetInstancePrivateIp"
221 | action: "aws:executeAwsApi"
222 | inputs:
223 | Service: "ec2"
224 | Api: "DescribeInstances"
225 | InstanceIds:
226 | - "{{ InstanceIds }}"
227 | outputs:
228 | - Name: "PrivateIp"
229 | Selector: "$.Reservations[0].Instances[0].PrivateIpAddress"
230 | Type: "String"
231 | - name: "RegisterInstanceInCloudMap"
232 | action: "aws:executeAwsApi"
233 | inputs:
234 | Service: "servicediscovery"
235 | Api: "RegisterInstance"
236 | ServiceId: "{{ GetCloudMapServiceIdFromInstanceTag.CloudMapServiceId }}"
237 | InstanceId: "{{ InstanceIds }}"
238 | Attributes:
239 | AWS_INSTANCE_IPV4: "{{ GetInstancePrivateIp.PrivateIp }}"
240 | outputs:
241 | - Name: "OperationId"
242 | Selector: "$.OperationId"
243 | Type: "String"
244 | - name: "WaitCloudMapOperationStatus"
245 | action: aws:waitForAwsResourceProperty
246 | timeoutSeconds: 60
247 | inputs:
248 | Service: "servicediscovery"
249 | Api: "GetOperation"
250 | OperationId: "{{ RegisterInstanceInCloudMap.OperationId }}"
251 | PropertySelector: "$.Operation.Status"
252 | DesiredValues:
253 | - SUCCESS
254 | - name: "AssertCloudMapOperationStatus"
255 | action: aws:assertAwsResourceProperty
256 | inputs:
257 | Service: "servicediscovery"
258 | Api: "GetOperation"
259 | OperationId: "{{ RegisterInstanceInCloudMap.OperationId }}"
260 | PropertySelector: "$.Operation.Status"
261 | DesiredValues:
262 | - SUCCESS
263 | - name: "GetCloudMapOperationStatus"
264 | action: "aws:executeAwsApi"
265 | inputs:
266 | Service: "servicediscovery"
267 | Api: "GetOperation"
268 | OperationId: "{{ RegisterInstanceInCloudMap.OperationId }}"
269 | outputs:
270 | - Name: "OperationStatus"
271 | Selector: "$.Operation.Status"
272 | Type: "String"
273 | - name: "SendExecutionLog"
274 | action: "aws:executeAwsApi"
275 | isEnd: true
276 | inputs:
277 | Service: "sns"
278 | Api: "Publish"
279 | TopicArn: "${aws_sns_topic.default.arn}"
280 | Subject: "Instance {{ InstanceIds }} initialization for ${local.project}"
281 | Message: "Instance {{ InstanceIds }} with ip {{ GetInstancePrivateIp.PrivateIp }} initialization {{ automation:EXECUTION_ID }} completed at {{ global:DATE_TIME }} CloudMap status: {{ GetCloudMapOperationStatus.OperationStatus }}"
282 | EOF
283 | }
284 |
--------------------------------------------------------------------------------
/ssm_parameterstore.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ///////////////////////////////////////////////[ SYSTEMS MANAGER - PARAMETERSTORE ]///////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SSM Parameterstore for aws env
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_ssm_parameter" "aws_env" {
10 | for_each = local.parameters
11 | name = "/${local.project}/${local.environment}/${each.key}"
12 | description = "Environment variable value for ${each.key}"
13 | type = "String"
14 | value = each.value
15 | tags = {
16 | Name = "${local.project}-${local.environment}-${each.key}"
17 | }
18 | }
19 | locals {
20 | parameters = {
21 | PROJECT = local.project
22 | ENVIRONMENT = local.environment
23 | AWS_DEFAULT_REGION = data.aws_region.current.name
24 | VPC_ID = aws_vpc.this.id
25 | CIDR = aws_vpc.this.cidr_block
26 | SUBNET_ID = join(",", random_shuffle.subnets.result)
27 | EC2_SECURITY_GROUP = aws_security_group.ec2.id
28 | SOURCE_AMI = data.aws_ami.distro.id
29 | EFS_SYSTEM_ID = aws_efs_file_system.this.id
30 | EFS_ACCESS_POINT_VAR = aws_efs_access_point.this["var"].id
31 | EFS_ACCESS_POINT_MEDIA = aws_efs_access_point.this["media"].id
32 | SNS_TOPIC_ARN = aws_sns_topic.default.arn
33 | FRONTEND_CLOUDMAP_SERVICE_ID = aws_service_discovery_service.this["frontend"].id
34 | VARNISH_CLOUDMAP_SERVICE_ID = aws_service_discovery_service.this["varnish"].id
35 | RABBITMQ_ENDPOINT = trimsuffix(trimprefix("${aws_mq_broker.this.instances.0.endpoints.0}", "amqps://"), ":5671")
36 | RABBITMQ_USER = var.brand
37 | RABBITMQ_PASSWORD = random_password.this["rabbitmq"].result
38 | OPENSEARCH_ENDPOINT = "https://${aws_opensearch_domain.this.endpoint}:443"
39 | OPENSEARCH_ADMIN = random_string.this["opensearch"].result
40 | OPENSEARCH_PASSWORD = random_password.this["opensearch"].result
41 | REDIS_CACHE_BACKEND = aws_elasticache_replication_group.this["cache"].primary_endpoint_address
42 | REDIS_SESSION_BACKEND = aws_elasticache_replication_group.this["session"].primary_endpoint_address
43 | REDIS_CACHE_BACKEND_RO = aws_elasticache_replication_group.this["cache"].reader_endpoint_address
44 | REDIS_SESSION_BACKEND_RO = aws_elasticache_replication_group.this["session"].reader_endpoint_address
45 | REDIS_PASSWORD = random_password.this["redis"].result
46 | S3_MEDIA_BUCKET = aws_s3_bucket.this["media"].bucket
47 | S3_SYSTEM_BUCKET = aws_s3_bucket.this["system"].bucket
48 | S3_MEDIA_BUCKET_URL = aws_s3_bucket.this["media"].bucket_regional_domain_name
49 | ALB_DNS_NAME = aws_lb.this.dns_name
50 | CLOUDFRONT_DOMAIN = aws_cloudfront_distribution.this.domain_name
51 | SES_KEY = aws_iam_access_key.ses_smtp_user_access_key.id
52 | SES_SECRET = aws_iam_access_key.ses_smtp_user_access_key.secret
53 | SES_PASSWORD = aws_iam_access_key.ses_smtp_user_access_key.ses_smtp_password_v4
54 | SES_ENDPOINT = "email-smtp.${data.aws_region.current.name}.amazonaws.com"
55 | MARIADB_ENDPOINT = aws_db_instance.this.endpoint
56 | MARIADB_NAME = aws_db_instance.this.identifier
57 | MARIADB_USER = aws_db_instance.this.username
58 | MARIADB_PASSWORD = random_password.this["database"].result
59 | ADMIN_PATH = "admin_${random_string.this["admin_path"].result}"
60 | DOMAIN = var.domain
61 | BRAND = var.brand
62 | PHP_USER = "php-${var.brand}"
63 | ADMIN_EMAIL = var.admin_email
64 | WEB_ROOT_PATH = "/home/${var.brand}/public_html"
65 | SECURITY_HEADER = random_uuid.this.result
66 | HEALTH_CHECK_LOCATION = random_string.this["health_check"].result
67 | RESOLVER = cidrhost(aws_vpc.this.cidr_block, 2)
68 | HTTP_X_HEADER = random_uuid.this.result
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/ssm_release.tf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | //////////////////////////////////////////////[ SYSTEM MANAGER DOCUMENT RELEASE ]/////////////////////////////////////////
5 |
6 | # # ---------------------------------------------------------------------------------------------------------------------#
7 | # Create SSM Document to check and deploy latest release on EC2 from S3
8 | # # ---------------------------------------------------------------------------------------------------------------------#
9 | resource "aws_ssm_document" "release" {
10 | name = "LatestReleaseDeployment"
11 | document_type = "Automation"
12 | document_format = "YAML"
13 | content = <