├── .gitignore ├── CICD ├── aws-config.sh ├── buildspec.yml └── install-terraform.sh ├── architecture.gif ├── main ├── backend.tf ├── main.tf ├── providers.tf ├── terraform.tfvars └── variables.tf ├── module ├── ASG │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── EC2 │ ├── config.sh │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── IAM │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── KEY │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── LB │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── SG │ ├── main.tf │ ├── output.tf │ └── variables.tf └── VPC │ ├── main.tf │ ├── output.tf │ └── variables.tf └── readme.md /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | main/.terraform* 3 | main/terraform 4 | 5 | module/KEY/controller* 6 | 7 | -------------------------------------------------------------------------------- /CICD/aws-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # fail on any error 4 | set -eu 5 | 6 | # configure named profile 7 | aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID --profile $PROFILE_NAME 8 | aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY --profile $PROFILE_NAME 9 | aws configure set region $AWS_REGION --profile $PROFILE_NAME 10 | 11 | # verify that profile is configured 12 | aws configure list --profile $PROFILE_NAME -------------------------------------------------------------------------------- /CICD/buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | phases: 3 | install: 4 | commands: 5 | - cd CICD 6 | - chmod +x aws-config.sh install-terraform.sh 7 | 8 | pre_build: 9 | commands: 10 | - ./aws-config.sh 11 | - ./install-terraform.sh 12 | 13 | build: 14 | commands: 15 | - cd .. 16 | - cd main 17 | - terraform init 18 | - terraform validate 19 | - terraform apply -auto-approve 20 | # - terraform destroy -auto-approve 21 | 22 | -------------------------------------------------------------------------------- /CICD/install-terraform.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # fail on any error 4 | set -eu 5 | 6 | # install yum-config-manager to manage your repositories 7 | sudo yum install -y yum-utils 8 | 9 | # use yum-config-manager to add the official HashiCorp Linux repository 10 | sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo 11 | 12 | # install terraform 13 | sudo yum -y install terraform 14 | 15 | # verify terraform is installed 16 | terraform --version 17 | 18 | -------------------------------------------------------------------------------- /architecture.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AnkitJodhani/Terraform-Production-ready-mongodb-project6/423a26914c3bbf6f710572aa3d79c3ef04ddcf4f/architecture.gif -------------------------------------------------------------------------------- /main/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | region = "us-east-1" 4 | bucket = "learning-terraform" 5 | key = "backend/nginx-mongodb-cluster.tfstate" 6 | dynamodb_table = "dynamoDB-state-locking" 7 | } 8 | } -------------------------------------------------------------------------------- /main/main.tf: -------------------------------------------------------------------------------- 1 | 2 | # creating VPC 3 | 4 | module "VPC" { 5 | source = "../module/VPC" 6 | REGION = var.REGION 7 | PROJECT_NAME = var.PROJECT_NAME 8 | VPC_CIDR = var.VPC_CIDR 9 | PUB_SUB_1_A_CIDR = var.PUB_SUB_1_A_CIDR 10 | PUB_SUB_2_B_CIDR = var.PUB_SUB_2_B_CIDR 11 | PUB_SUB_3_C_CIDR = var.PUB_SUB_3_C_CIDR 12 | PRI_SUB_4_A_CIDR = var.PRI_SUB_4_A_CIDR 13 | PRI_SUB_5_B_CIDR = var.PRI_SUB_5_B_CIDR 14 | PRI_SUB_6_C_CIDR = var.PRI_SUB_6_C_CIDR 15 | PRI_SUB_7_A_CIDR = var.PRI_SUB_7_A_CIDR 16 | PRI_SUB_8_B_CIDR = var.PRI_SUB_8_B_CIDR 17 | PRI_SUB_9_C_CIDR = var.PRI_SUB_9_C_CIDR 18 | } 19 | 20 | 21 | module "SG" { 22 | source = "../module/SG" 23 | VPC_ID = module.VPC.VPC_ID 24 | } 25 | 26 | 27 | # createing Key for instaces 28 | module "KEY" { 29 | source = "../module/KEY" 30 | } 31 | 32 | # create IAM role and that will be attach to the control machine 33 | module "IAM" { 34 | source = "../module/IAM" 35 | } 36 | 37 | module "EC2" { 38 | source = "../module/EC2" 39 | VPC_ID = module.VPC.VPC_ID 40 | CPU = var.CPU 41 | AMI = var.AMI 42 | KEY_NAME = module.KEY.KEY_NAME 43 | PRI_SUB_7_A_ID = module.VPC.PRI_SUB_7_A_ID 44 | PRI_SUB_8_B_ID = module.VPC.PRI_SUB_8_B_ID 45 | PRI_SUB_9_C_ID = module.VPC.PRI_SUB_9_C_ID 46 | MONGODB_CLUSTER_SG_ID = module.SG.MONGODB_CLUSTER_SG_ID 47 | CONTROL_MACHINE_SG_ID = module.SG.CONTROL_MACHINE_SG_ID 48 | ROLE_NAME = module.IAM.ROLE_NAME 49 | } 50 | 51 | 52 | module "LB" { 53 | source = "../module/LB" 54 | 55 | VPC_ID = module.VPC.VPC_ID 56 | PROJECT_NAME = module.VPC.PROJECT_NAME 57 | PUB_SUB_1_A_ID = module.VPC.PUB_SUB_1_A_ID 58 | PUB_SUB_2_B_ID = module.VPC.PUB_SUB_2_B_ID 59 | PUB_SUB_3_C_ID = module.VPC.PUB_SUB_3_C_ID 60 | ALB_SG_ID = module.SG.ALB_SG_ID 61 | NLB_SG_ID = module.SG.NLB_SG_ID 62 | MONGOS_1_ID = module.EC2.MONGOS_1_ID 63 | MONGOS_2_ID = module.EC2.MONGOS_2_ID 64 | } 65 | 66 | 67 | module "ASG" { 68 | source = "../module/ASG" 69 | CPU = var.CPU 70 | AMI = var.AMI 71 | PROJECT_NAME = module.VPC.PROJECT_NAME 72 | KEY_NAME = module.KEY.KEY_NAME 73 | NGINX_SG_ID = module.SG.NGINX_SG_ID 74 | MAX_SIZE = var.MAX_SIZE 75 | MIN_SIZE = var.MIN_SIZE 76 | DESIRED_CAP = var.DESIRED_CAP 77 | PRI_SUB_4_A_ID = module.VPC.PRI_SUB_4_A_ID 78 | PRI_SUB_5_B_ID = module.VPC.PRI_SUB_5_B_ID 79 | PRI_SUB_6_C_ID = module.VPC.PRI_SUB_6_C_ID 80 | NGINX_TG_ARN = module.LB.NGINX_TG_ARN 81 | 82 | 83 | } 84 | -------------------------------------------------------------------------------- /main/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.67.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | # Configuration options 12 | region = var.REGION 13 | profile = "myTerraform" 14 | } -------------------------------------------------------------------------------- /main/terraform.tfvars: -------------------------------------------------------------------------------- 1 | REGION = "us-east-1" 2 | PROJECT_NAME = "nginx-mongodb-cluster" 3 | CPU = "t2.micro" 4 | AMI = "ami-053b0d53c279acc90" 5 | MAX_SIZE = "3" 6 | MIN_SIZE = "3" 7 | DESIRED_CAP = "3" 8 | VPC_CIDR = "172.20.0.0/16" 9 | PUB_SUB_1_A_CIDR = "172.20.1.0/24" 10 | PUB_SUB_2_B_CIDR = "172.20.2.0/24" 11 | PUB_SUB_3_C_CIDR = "172.20.3.0/24" 12 | PRI_SUB_4_A_CIDR = "172.20.4.0/24" 13 | PRI_SUB_5_B_CIDR = "172.20.5.0/24" 14 | PRI_SUB_6_C_CIDR = "172.20.6.0/24" 15 | PRI_SUB_7_A_CIDR = "172.20.7.0/24" 16 | PRI_SUB_8_B_CIDR = "172.20.8.0/24" 17 | PRI_SUB_9_C_CIDR = "172.20.9.0/24" 18 | -------------------------------------------------------------------------------- /main/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "REGION" {} 3 | variable "PROJECT_NAME" {} 4 | variable "CPU" {} 5 | variable "AMI" {} 6 | variable "MAX_SIZE" {} 7 | variable "MIN_SIZE" {} 8 | variable "DESIRED_CAP" {} 9 | variable "VPC_CIDR" {} 10 | variable "PUB_SUB_1_A_CIDR" {} 11 | variable "PUB_SUB_2_B_CIDR" {} 12 | variable "PUB_SUB_3_C_CIDR" {} 13 | variable "PRI_SUB_4_A_CIDR" {} 14 | variable "PRI_SUB_5_B_CIDR" {} 15 | variable "PRI_SUB_6_C_CIDR" {} 16 | variable "PRI_SUB_7_A_CIDR" {} 17 | variable "PRI_SUB_8_B_CIDR" {} 18 | variable "PRI_SUB_9_C_CIDR" {} 19 | 20 | 21 | -------------------------------------------------------------------------------- /module/ASG/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_launch_template" "this" { 2 | name = "${var.PROJECT_NAME}-tpl" 3 | image_id = var.AMI 4 | instance_type = var.CPU 5 | key_name = var.KEY_NAME 6 | 7 | 8 | # network_interfaces { 9 | # associate_public_ip_address = false 10 | # } 11 | vpc_security_group_ids = [var.NGINX_SG_ID] 12 | tags = { 13 | Name = "${var.PROJECT_NAME}-tpl" 14 | } 15 | } 16 | 17 | resource "aws_autoscaling_group" "this" { 18 | 19 | name = "${var.PROJECT_NAME}-asg" 20 | max_size = var.MAX_SIZE 21 | min_size = var.MIN_SIZE 22 | desired_capacity = var.DESIRED_CAP 23 | health_check_grace_period = 300 24 | health_check_type = "EC2" #"ELB" or default EC2 25 | #availability_zones = var.availability_zones #["us-east-1a"] 26 | vpc_zone_identifier = [var.PRI_SUB_4_A_ID,var.PRI_SUB_5_B_ID,var.PRI_SUB_6_C_ID] 27 | target_group_arns = [var.NGINX_TG_ARN] #var.target_group_arns 28 | 29 | enabled_metrics = [ 30 | "GroupMinSize", 31 | "GroupMaxSize", 32 | "GroupDesiredCapacity", 33 | "GroupInServiceInstances", 34 | "GroupTotalInstances" 35 | ] 36 | 37 | metrics_granularity = "1Minute" 38 | 39 | launch_template { 40 | id = aws_launch_template.this.id 41 | version = aws_launch_template.this.latest_version #"$Latest" 42 | } 43 | # depends_on = [module.aws_lb] 44 | } 45 | 46 | # scale up policy 47 | resource "aws_autoscaling_policy" "scale_up" { 48 | name = "${var.PROJECT_NAME}-asg-scale-up" 49 | autoscaling_group_name = aws_autoscaling_group.this.name 50 | adjustment_type = "ChangeInCapacity" 51 | scaling_adjustment = "1" #increasing instance by 1 52 | cooldown = "300" 53 | policy_type = "SimpleScaling" 54 | } 55 | 56 | # scale up alarm 57 | # alarm will trigger the ASG policy (scale/down) based on the metric (CPUUtilization), comparison_operator, threshold 58 | resource "aws_cloudwatch_metric_alarm" "scale_up_alarm" { 59 | alarm_name = "${var.PROJECT_NAME}-asg-scale-up-alarm" 60 | alarm_description = "asg-scale-up-cpu-alarm" 61 | comparison_operator = "GreaterThanOrEqualToThreshold" 62 | evaluation_periods = "2" 63 | metric_name = "CPUUtilization" 64 | namespace = "AWS/EC2" 65 | period = "120" 66 | statistic = "Average" 67 | threshold = "30" # New instance will be created once CPU utilization is higher than 30 % 68 | dimensions = { 69 | "AutoScalingGroupName" = aws_autoscaling_group.this.name 70 | } 71 | actions_enabled = true 72 | alarm_actions = [aws_autoscaling_policy.scale_up.arn] 73 | } 74 | 75 | # scale down policy 76 | resource "aws_autoscaling_policy" "scale_down" { 77 | name = "${var.PROJECT_NAME}-asg-scale-down" 78 | autoscaling_group_name = aws_autoscaling_group.this.name 79 | adjustment_type = "ChangeInCapacity" 80 | scaling_adjustment = "-1" # decreasing instance by 1 81 | cooldown = "300" 82 | policy_type = "SimpleScaling" 83 | } 84 | 85 | # scale down alarm 86 | resource "aws_cloudwatch_metric_alarm" "scale_down_alarm" { 87 | alarm_name = "${var.PROJECT_NAME}-asg-scale-down-alarm" 88 | alarm_description = "asg-scale-down-cpu-alarm" 89 | comparison_operator = "LessThanOrEqualToThreshold" 90 | evaluation_periods = "2" 91 | metric_name = "CPUUtilization" 92 | namespace = "AWS/EC2" 93 | period = "120" 94 | statistic = "Average" 95 | threshold = "5" # Instance will scale down when CPU utilization is lower than 5 % 96 | dimensions = { 97 | "AutoScalingGroupName" = aws_autoscaling_group.this.name 98 | } 99 | actions_enabled = true 100 | alarm_actions = [aws_autoscaling_policy.scale_down.arn] 101 | } 102 | -------------------------------------------------------------------------------- /module/ASG/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AnkitJodhani/Terraform-Production-ready-mongodb-project6/423a26914c3bbf6f710572aa3d79c3ef04ddcf4f/module/ASG/output.tf -------------------------------------------------------------------------------- /module/ASG/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | variable "PROJECT_NAME" {} 4 | 5 | variable "AMI" {} 6 | 7 | variable "CPU" {} 8 | 9 | variable "KEY_NAME" {} 10 | 11 | variable "NGINX_SG_ID" {} 12 | 13 | variable "MAX_SIZE" {} 14 | 15 | variable "MIN_SIZE" {} 16 | 17 | variable "DESIRED_CAP" {} 18 | 19 | variable "PRI_SUB_4_A_ID" {} 20 | variable "PRI_SUB_5_B_ID" {} 21 | variable "PRI_SUB_6_C_ID" {} 22 | 23 | variable "NGINX_TG_ARN" {} 24 | 25 | -------------------------------------------------------------------------------- /module/EC2/config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt update -y 4 | 5 | sudo apt install software-properties-common -y 6 | 7 | sudo add-apt-repository --yes --update ppa:ansible/ansible 8 | 9 | sudo apt install ansible -y 10 | 11 | sudo apt install jq -y 12 | 13 | sudo apt install zip unzip -y 14 | 15 | sudo apt update -y 16 | 17 | sudo apt install openssl -y 18 | 19 | sudo apt update -y 20 | 21 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 22 | 23 | unzip awscliv2.zip 24 | 25 | sudo ./aws/install 26 | 27 | sudo apt install python3-pip -y 28 | 29 | sudo apt update -y 30 | 31 | 32 | # This installs the CodeDeploy agent and its prerequisites on Ubuntu 22.04. 33 | sudo apt-get update 34 | 35 | sudo apt-get install ruby-full ruby-webrick wget -y 36 | 37 | cd /tmp 38 | 39 | wget https://aws-codedeploy-us-east-1.s3.us-east-1.amazonaws.com/releases/codedeploy-agent_1.3.2-1902_all.deb 40 | 41 | mkdir codedeploy-agent_1.3.2-1902_ubuntu22 42 | 43 | dpkg-deb -R codedeploy-agent_1.3.2-1902_all.deb codedeploy-agent_1.3.2-1902_ubuntu22 44 | 45 | sed 's/Depends:.*/Depends:ruby3.0/' -i ./codedeploy-agent_1.3.2-1902_ubuntu22/DEBIAN/control 46 | 47 | dpkg-deb -b codedeploy-agent_1.3.2-1902_ubuntu22/ 48 | 49 | sudo dpkg -i codedeploy-agent_1.3.2-1902_ubuntu22.deb 50 | 51 | systemctl list-units --type=service | grep codedeploy 52 | 53 | sudo systemctl start codedeploy-agent 54 | 55 | # installing cloudwatch agent 56 | 57 | sudo apt update -y 58 | 59 | sudo apt install wget -y 60 | 61 | sudo wget https://amazoncloudwatch-agent.s3.us-east-1.amazonaws.com/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb 62 | 63 | sudo dpkg -i ./amazon-cloudwatch-agent.deb 64 | 65 | sudo apt update -y 66 | 67 | sudo apt update -y 68 | 69 | 70 | JSON_CONTENT='{ 71 | "agent": { 72 | "run_as_user": "cwagent" 73 | }, 74 | "logs": { 75 | "logs_collected": { 76 | "files": { 77 | "collect_list": [ 78 | { 79 | "file_path": "/opt/codedeploy-agent/deployment-root/deployment-logs/codedeploy-agent-deployments.log", 80 | "log_group_name": "codedeploy-agent-deployments.log", 81 | "log_stream_name": "{instance_id}", 82 | "retention_in_days": -1, 83 | "timestamp_format": "[%Y-%m-%d %H:%M:%S.%f]" 84 | } 85 | ] 86 | } 87 | } 88 | } 89 | }' 90 | 91 | 92 | cat < /opt/aws/amazon-cloudwatch-agent/bin/config.json 93 | $JSON_CONTENT 94 | EOF 95 | 96 | 97 | 98 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/bin/config.json -s 99 | 100 | 101 | sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/bin/config.json -s 102 | 103 | 104 | -------------------------------------------------------------------------------- /module/EC2/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "servers" { 2 | ami = var.AMI 3 | instance_type = var.CPU 4 | vpc_security_group_ids = [var.MONGODB_CLUSTER_SG_ID] 5 | key_name = var.KEY_NAME 6 | for_each = local.data 7 | subnet_id = each.value.sub_id 8 | tags = { 9 | Name = each.value.name 10 | } 11 | } 12 | 13 | # Create an IAM instance profile 14 | resource "aws_iam_instance_profile" "control_machine_profile" { 15 | name = "control-machine-role-profile" 16 | role = var.ROLE_NAME 17 | } 18 | 19 | resource "aws_instance" "control-machine" { 20 | ami = var.AMI 21 | instance_type = var.CPU 22 | vpc_security_group_ids = [var.CONTROL_MACHINE_SG_ID] 23 | key_name = var.KEY_NAME 24 | subnet_id = var.PRI_SUB_7_A_ID 25 | user_data = filebase64("../module/EC2/config.sh") 26 | iam_instance_profile = aws_iam_instance_profile.control_machine_profile.name 27 | tags = { 28 | Name = "control-machine" 29 | } 30 | } 31 | 32 | -------------------------------------------------------------------------------- /module/EC2/output.tf: -------------------------------------------------------------------------------- 1 | output "MONGOS_1_ID" { 2 | value = aws_instance.servers["mongos-1"].id 3 | } 4 | output "MONGOS_2_ID" { 5 | value = aws_instance.servers["mongos-2"].id 6 | } 7 | -------------------------------------------------------------------------------- /module/EC2/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "VPC_ID" {} 3 | variable "CPU" { 4 | default = "t2.micro" 5 | } 6 | variable "AMI" { 7 | default = "ami-053b0d53c279acc90" 8 | } 9 | variable "ROLE_NAME" {} 10 | variable "KEY_NAME" {} 11 | # variable "PUB_SUB_1_A_ID" {} 12 | # variable "PUB_SUB_2_B_ID" {} 13 | # variable "PUB_SUB_3_C_ID" {} 14 | # variable "PRI_SUB_4_A_ID" {} 15 | # variable "PRI_SUB_5_B_ID" {} 16 | # variable "PRI_SUB_6_C_ID" {} 17 | variable "PRI_SUB_7_A_ID" {} 18 | variable "PRI_SUB_8_B_ID" {} 19 | variable "PRI_SUB_9_C_ID" {} 20 | variable "MONGODB_CLUSTER_SG_ID" {} 21 | variable "CONTROL_MACHINE_SG_ID" {} 22 | 23 | locals { 24 | data = { 25 | shard-1 = { 26 | sub_id = var.PRI_SUB_7_A_ID 27 | name = "shard-1" 28 | } 29 | shard-2 = { 30 | sub_id = var.PRI_SUB_8_B_ID 31 | name = "shard-2" 32 | } 33 | shard-3 = { 34 | sub_id = var.PRI_SUB_9_C_ID 35 | name = "shard-3" 36 | } 37 | csrs-1 = { 38 | sub_id = var.PRI_SUB_7_A_ID 39 | name = "csrs-1" 40 | } 41 | csrs-2 = { 42 | sub_id = var.PRI_SUB_8_B_ID 43 | name = "csrs-2" 44 | } 45 | csrs-3 = { 46 | sub_id = var.PRI_SUB_9_C_ID 47 | name = "csrs-3" 48 | } 49 | mongos-1 = { 50 | sub_id = var.PRI_SUB_7_A_ID 51 | name = "mongos-1" 52 | } 53 | mongos-2 = { 54 | sub_id = var.PRI_SUB_9_C_ID 55 | name = "mongos-2" 56 | } 57 | } 58 | } 59 | 60 | -------------------------------------------------------------------------------- /module/IAM/main.tf: -------------------------------------------------------------------------------- 1 | 2 | # Create an IAM role 3 | resource "aws_iam_role" "control_machine" { 4 | name = "S3-and-secManger-access" 5 | 6 | assume_role_policy = jsonencode({ 7 | Version = "2012-10-17" 8 | Statement = [ 9 | { 10 | Effect = "Allow" 11 | Principal = { 12 | Service = "ec2.amazonaws.com" 13 | } 14 | Action = "sts:AssumeRole" 15 | } 16 | ] 17 | }) 18 | } 19 | 20 | 21 | 22 | resource "aws_iam_role_policy_attachment" "s3_access" { 23 | role = aws_iam_role.control_machine.name 24 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforAWSCodeDeploy" 25 | } 26 | 27 | resource "aws_iam_role_policy_attachment" "secret_manager_access" { 28 | role = aws_iam_role.control_machine.name 29 | policy_arn = "arn:aws:iam::aws:policy/SecretsManagerReadWrite" 30 | } 31 | resource "aws_iam_role_policy_attachment" "CloudWatchAgent" { 32 | role = aws_iam_role.control_machine.name 33 | policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" 34 | } 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /module/IAM/output.tf: -------------------------------------------------------------------------------- 1 | output "ROLE_NAME"{ 2 | value = aws_iam_role.control_machine.name 3 | } -------------------------------------------------------------------------------- /module/IAM/variables.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AnkitJodhani/Terraform-Production-ready-mongodb-project6/423a26914c3bbf6f710572aa3d79c3ef04ddcf4f/module/IAM/variables.tf -------------------------------------------------------------------------------- /module/KEY/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "key" { 2 | key_name = "controller" 3 | public_key = file("../module/KEY/controller.pub") 4 | } 5 | 6 | -------------------------------------------------------------------------------- /module/KEY/output.tf: -------------------------------------------------------------------------------- 1 | output "KEY_NAME" { 2 | value = aws_key_pair.key.key_name 3 | } 4 | 5 | -------------------------------------------------------------------------------- /module/KEY/variables.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AnkitJodhani/Terraform-Production-ready-mongodb-project6/423a26914c3bbf6f710572aa3d79c3ef04ddcf4f/module/KEY/variables.tf -------------------------------------------------------------------------------- /module/LB/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------- ALB------------------------------------------ 2 | # create application load balancer 3 | resource "aws_lb" "application_load_balancer" { 4 | name = "${var.PROJECT_NAME}-alb" 5 | internal = false 6 | load_balancer_type = "application" 7 | security_groups = [var.ALB_SG_ID] 8 | subnets = [var.PUB_SUB_1_A_ID,var.PUB_SUB_2_B_ID,var.PUB_SUB_3_C_ID] 9 | enable_deletion_protection = false 10 | 11 | tags = { 12 | Name = "${var.PROJECT_NAME}-alb" 13 | } 14 | } 15 | 16 | # create target group 17 | resource "aws_lb_target_group" "alb_target_group" { 18 | name = "${var.PROJECT_NAME}-alb-tg" 19 | target_type = "instance" 20 | port = 80 21 | protocol = "HTTP" 22 | vpc_id = var.VPC_ID 23 | 24 | health_check { 25 | enabled = true 26 | interval = 300 27 | path = "/" 28 | timeout = 60 29 | matcher = 200 30 | healthy_threshold = 5 31 | unhealthy_threshold = 5 32 | } 33 | 34 | lifecycle { 35 | create_before_destroy = true 36 | } 37 | } 38 | 39 | # create a listener on port 80 with redirect action 40 | resource "aws_lb_listener" "alb_http_listener" { 41 | load_balancer_arn = aws_lb.application_load_balancer.arn 42 | port = 80 43 | protocol = "HTTP" 44 | 45 | default_action { 46 | type = "forward" 47 | target_group_arn = aws_lb_target_group.alb_target_group.arn 48 | } 49 | } 50 | 51 | #------------------------------------------------------- NLB ------------------------------------------ 52 | # create network load balancer 53 | resource "aws_lb" "network_load_balancer" { 54 | name = "${var.PROJECT_NAME}-nlb" 55 | internal = false 56 | load_balancer_type = "network" 57 | security_groups = [var.NLB_SG_ID] 58 | subnets = [var.PUB_SUB_1_A_ID,var.PUB_SUB_2_B_ID,var.PUB_SUB_3_C_ID] 59 | enable_deletion_protection = false 60 | 61 | tags = { 62 | Name = "${var.PROJECT_NAME}-nlb" 63 | Env = "Prod" 64 | } 65 | } 66 | 67 | # create target group 68 | resource "aws_lb_target_group" "nlb_target_group" { 69 | name = "${var.PROJECT_NAME}-nlb-tg" 70 | target_type = "instance" 71 | port = 26000 72 | protocol = "TCP" 73 | vpc_id = var.VPC_ID 74 | 75 | health_check { 76 | enabled = true 77 | interval = 300 78 | path = "/" 79 | timeout = 60 80 | matcher = 200 81 | healthy_threshold = 5 82 | unhealthy_threshold = 5 83 | } 84 | 85 | lifecycle { 86 | create_before_destroy = true 87 | } 88 | } 89 | 90 | # create a listener on port 26000 with redirect action 91 | resource "aws_lb_listener" "nlb_tcp_listener" { 92 | load_balancer_arn = aws_lb.network_load_balancer.arn 93 | port = 26000 94 | protocol = "TCP" 95 | 96 | default_action { 97 | type = "forward" 98 | target_group_arn = aws_lb_target_group.nlb_target_group.arn 99 | } 100 | } 101 | 102 | resource "aws_lb_target_group_attachment" "mongos_1_attachment" { 103 | target_group_arn = aws_lb_target_group.nlb_target_group.arn 104 | target_id = var.MONGOS_1_ID 105 | port = 26000 106 | } 107 | resource "aws_lb_target_group_attachment" "mongos_2_attachment" { 108 | target_group_arn = aws_lb_target_group.nlb_target_group.arn 109 | target_id = var.MONGOS_2_ID 110 | port = 26000 111 | } 112 | 113 | 114 | -------------------------------------------------------------------------------- /module/LB/output.tf: -------------------------------------------------------------------------------- 1 | 2 | output "NGINX_TG_ARN"{ 3 | value = aws_lb_target_group.alb_target_group.arn 4 | } -------------------------------------------------------------------------------- /module/LB/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | variable "PROJECT_NAME"{} 4 | variable "ALB_SG_ID"{} 5 | variable "NLB_SG_ID"{} 6 | variable "VPC_ID" {} 7 | variable "PUB_SUB_1_A_ID"{} 8 | variable "PUB_SUB_2_B_ID"{} 9 | variable "PUB_SUB_3_C_ID"{} 10 | variable "MONGOS_1_ID" {} 11 | variable "MONGOS_2_ID" {} -------------------------------------------------------------------------------- /module/SG/main.tf: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------------------- Control-machine-sg------------------------------------------------------------------- 2 | 3 | resource "aws_security_group" "control-machine" { 4 | name = "control-machine" 5 | description = "control-machine security group" 6 | vpc_id = var.VPC_ID 7 | 8 | # ingress { 9 | # description = "http access" 10 | # from_port = 22 11 | # to_port = 22 12 | # protocol = "tcp" 13 | # cidr_blocks = ["0.0.0.0/0"] 14 | # } 15 | 16 | egress { 17 | from_port = 0 18 | to_port = 0 19 | protocol = -1 20 | cidr_blocks = ["0.0.0.0/0"] 21 | } 22 | 23 | tags = { 24 | Name = "control-machine-sg" 25 | } 26 | } 27 | 28 | #--------------------------------------------------------------------------------- ALB-sg------------------------------------------------------------------- 29 | 30 | # create security group for the application load balancer 31 | resource "aws_security_group" "alb_sg" { 32 | name = "alb-sg" 33 | description = "enable http/https access on port 80/443" 34 | vpc_id = var.VPC_ID 35 | 36 | ingress { 37 | description = "http access" 38 | from_port = 80 39 | to_port = 80 40 | protocol = "tcp" 41 | cidr_blocks = ["0.0.0.0/0"] 42 | } 43 | 44 | ingress { 45 | description = "https access" 46 | from_port = 443 47 | to_port = 443 48 | protocol = "tcp" 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | 52 | egress { 53 | from_port = 0 54 | to_port = 0 55 | protocol = -1 56 | cidr_blocks = ["0.0.0.0/0"] 57 | } 58 | 59 | tags = { 60 | Name = "ALB-SG" 61 | } 62 | } 63 | 64 | #--------------------------------------------------------------------------------- Nginx-EC2-sg------------------------------------------------------------------- 65 | 66 | # create security group for the application load balancer 67 | resource "aws_security_group" "nginx_sg" { 68 | name = "nginx-ec2-sg" 69 | description = "enable http from ALB-sg on port 80" 70 | vpc_id = var.VPC_ID 71 | 72 | ingress { 73 | description = "http access" 74 | from_port = 80 75 | to_port = 80 76 | protocol = "tcp" 77 | security_groups = [aws_security_group.alb_sg.id] 78 | } 79 | 80 | ingress { 81 | description = "allow control-machine" 82 | from_port = 22 83 | to_port = 22 84 | protocol = "tcp" 85 | security_groups = [aws_security_group.control-machine.id] 86 | } 87 | 88 | 89 | egress { 90 | from_port = 0 91 | to_port = 0 92 | protocol = -1 93 | cidr_blocks = ["0.0.0.0/0"] 94 | } 95 | 96 | tags = { 97 | Name = "Nginx-EC2-sg" 98 | } 99 | depends_on = [aws_security_group.alb_sg] 100 | } 101 | 102 | #--------------------------------------------------------------------------------- NLB-sg------------------------------------------------------------------- 103 | 104 | # create security group for the Network load balancer 105 | resource "aws_security_group" "nlb_sg" { 106 | name = "nlb-sg" 107 | description = "enable http/https access on port 26000" 108 | vpc_id = var.VPC_ID 109 | 110 | ingress { 111 | description = "https access" 112 | from_port = 26000 113 | to_port = 26000 114 | protocol = "tcp" 115 | cidr_blocks = ["0.0.0.0/0"] 116 | } 117 | 118 | egress { 119 | from_port = 0 120 | to_port = 0 121 | protocol = -1 122 | cidr_blocks = ["0.0.0.0/0"] 123 | } 124 | 125 | tags = { 126 | Name = "NLB-SG" 127 | } 128 | } 129 | 130 | 131 | #--------------------------------------------------------------------------------- Mongodb-cluster-ip ------------------------------------------------------------------- 132 | 133 | # create security group for the mongodb cluster 134 | resource "aws_security_group" "mongodb_cluster_sg" { 135 | name = "mongodb-cluster-sg" 136 | description = "enable tcp from nlb-sg on port 26000" 137 | vpc_id = var.VPC_ID 138 | 139 | ingress { 140 | description = " access" 141 | from_port = 26000 142 | to_port = 26000 143 | protocol = "tcp" 144 | security_groups = [aws_security_group.nlb_sg.id] 145 | } 146 | 147 | ingress { 148 | description = "allow control-machine" 149 | from_port = 22 150 | to_port = 22 151 | protocol = "tcp" 152 | security_groups = [aws_security_group.control-machine.id] 153 | } 154 | 155 | egress { 156 | from_port = 0 157 | to_port = 0 158 | protocol = -1 159 | cidr_blocks = ["0.0.0.0/0"] 160 | } 161 | 162 | tags = { 163 | Name = "Mongodb-cluster" 164 | } 165 | depends_on = [aws_security_group.nlb_sg] 166 | } 167 | 168 | 169 | resource "aws_security_group_rule" "internal_traffic" { 170 | type = "ingress" 171 | from_port = 0 172 | to_port = 65535 173 | protocol = "tcp" 174 | security_group_id = aws_security_group.mongodb_cluster_sg.id 175 | source_security_group_id = aws_security_group.mongodb_cluster_sg.id 176 | 177 | } 178 | 179 | -------------------------------------------------------------------------------- /module/SG/output.tf: -------------------------------------------------------------------------------- 1 | output "ALB_SG_ID" { 2 | value = aws_security_group.alb_sg.id 3 | } 4 | output "NGINX_SG_ID" { 5 | value = aws_security_group.nginx_sg.id 6 | } 7 | output "NLB_SG_ID" { 8 | value = aws_security_group.nlb_sg.id 9 | } 10 | output "MONGODB_CLUSTER_SG_ID" { 11 | value = aws_security_group.mongodb_cluster_sg.id 12 | } 13 | 14 | output "CONTROL_MACHINE_SG_ID" { 15 | value = aws_security_group.control-machine.id 16 | } -------------------------------------------------------------------------------- /module/SG/variables.tf: -------------------------------------------------------------------------------- 1 | variable "VPC_ID"{ 2 | 3 | } -------------------------------------------------------------------------------- /module/VPC/main.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------ VPC ----------------------------------------------------------------------- 2 | 3 | # create vpc 4 | resource "aws_vpc" "vpc" { 5 | cidr_block = var.VPC_CIDR 6 | instance_tenancy = "default" 7 | enable_dns_hostnames = true 8 | enable_dns_support = true 9 | 10 | tags = { 11 | Name = "${var.PROJECT_NAME}-vpc" 12 | } 13 | } 14 | 15 | 16 | # use data source to get all avalablility zones in region 17 | data "aws_availability_zones" "available_zones" {} 18 | 19 | #------------------------------------------------------ Public subnet---------------------------------------------------------------- 20 | 21 | # create public subnet pub-sub-1-a 22 | resource "aws_subnet" "pub-sub-1-a" { 23 | vpc_id = aws_vpc.vpc.id 24 | cidr_block = var.PUB_SUB_1_A_CIDR 25 | availability_zone = data.aws_availability_zones.available_zones.names[0] 26 | map_public_ip_on_launch = true 27 | 28 | tags = { 29 | Name = "pub-sub-1-a" 30 | } 31 | } 32 | 33 | # create public subnet pub-sub-2-b 34 | resource "aws_subnet" "pub-sub-2-b" { 35 | vpc_id = aws_vpc.vpc.id 36 | cidr_block = var.PUB_SUB_2_B_CIDR 37 | availability_zone = data.aws_availability_zones.available_zones.names[1] 38 | map_public_ip_on_launch = true 39 | 40 | tags = { 41 | Name = "pub-sub-2-b" 42 | } 43 | } 44 | # create public subnet pub-sub-3-c 45 | resource "aws_subnet" "pub-sub-3-c" { 46 | vpc_id = aws_vpc.vpc.id 47 | cidr_block = var.PUB_SUB_3_C_CIDR 48 | availability_zone = data.aws_availability_zones.available_zones.names[2] 49 | map_public_ip_on_launch = true 50 | 51 | tags = { 52 | Name = "pub-sub-3-c" 53 | } 54 | } 55 | 56 | #------------------------------------------------------ Private subnet---------------------------------------------------------------- 57 | 58 | # create private app pri-sub-4-a 59 | resource "aws_subnet" "pri-sub-4-a" { 60 | vpc_id = aws_vpc.vpc.id 61 | cidr_block = var.PRI_SUB_4_A_CIDR 62 | availability_zone = data.aws_availability_zones.available_zones.names[0] 63 | map_public_ip_on_launch = false 64 | 65 | tags = { 66 | Name = "pri-sub-4-a" 67 | } 68 | } 69 | # create private app pri-sub-5-b 70 | resource "aws_subnet" "pri-sub-5-b" { 71 | vpc_id = aws_vpc.vpc.id 72 | cidr_block = var.PRI_SUB_5_B_CIDR 73 | availability_zone = data.aws_availability_zones.available_zones.names[1] 74 | map_public_ip_on_launch = false 75 | 76 | tags = { 77 | Name = "pri-sub-5-b" 78 | } 79 | } 80 | # create private app pri-sub-6-c 81 | resource "aws_subnet" "pri-sub-6-c" { 82 | vpc_id = aws_vpc.vpc.id 83 | cidr_block = var.PRI_SUB_6_C_CIDR 84 | availability_zone = data.aws_availability_zones.available_zones.names[2] 85 | map_public_ip_on_launch = false 86 | 87 | tags = { 88 | Name = "pri-sub-6-c" 89 | } 90 | } 91 | # create private app pri-sub-7-a 92 | resource "aws_subnet" "pri-sub-7-a" { 93 | vpc_id = aws_vpc.vpc.id 94 | cidr_block = var.PRI_SUB_7_A_CIDR 95 | availability_zone = data.aws_availability_zones.available_zones.names[0] 96 | map_public_ip_on_launch = false 97 | 98 | tags = { 99 | Name = "pri-sub-7-a" 100 | } 101 | } 102 | # create private app pri-sub-8-b 103 | resource "aws_subnet" "pri-sub-8-b" { 104 | vpc_id = aws_vpc.vpc.id 105 | cidr_block = var.PRI_SUB_8_B_CIDR 106 | availability_zone = data.aws_availability_zones.available_zones.names[1] 107 | map_public_ip_on_launch = false 108 | 109 | tags = { 110 | Name = "pri-sub-8-b" 111 | } 112 | } 113 | # create private app pri-sub-9-c 114 | resource "aws_subnet" "pri-sub-9-c" { 115 | vpc_id = aws_vpc.vpc.id 116 | cidr_block = var.PRI_SUB_9_C_CIDR 117 | availability_zone = data.aws_availability_zones.available_zones.names[2] 118 | map_public_ip_on_launch = false 119 | 120 | tags = { 121 | Name = "pri-sub-9-c" 122 | } 123 | } 124 | 125 | #------------------------------------------------------ IGW ----------------------------------------------------------------------- 126 | 127 | # create internet gateway and attach it to vpc 128 | resource "aws_internet_gateway" "internet_gateway" { 129 | vpc_id = aws_vpc.vpc.id 130 | 131 | tags = { 132 | Name = "${var.PROJECT_NAME}-igw" 133 | } 134 | } 135 | 136 | 137 | #------------------------------------------------------ Public-RT & subnet association ----------------------------------------------------------------------- 138 | 139 | # create route table and add public route 140 | resource "aws_route_table" "public_route_table" { 141 | vpc_id = aws_vpc.vpc.id 142 | 143 | route { 144 | cidr_block = "0.0.0.0/0" 145 | gateway_id = aws_internet_gateway.internet_gateway.id 146 | } 147 | 148 | tags = { 149 | Name = "Public-RT" 150 | } 151 | } 152 | 153 | 154 | # associate public subnet pub-sub-1-a to "public route table" 155 | resource "aws_route_table_association" "pub-sub-1-a_route_table_association" { 156 | subnet_id = aws_subnet.pub-sub-1-a.id 157 | route_table_id = aws_route_table.public_route_table.id 158 | } 159 | 160 | # associate public subnet az2 to "public route table" 161 | resource "aws_route_table_association" "pub-sub-2-b_route_table_association" { 162 | subnet_id = aws_subnet.pub-sub-2-b.id 163 | route_table_id = aws_route_table.public_route_table.id 164 | } 165 | # associate public subnet az2 to "public route table" 166 | resource "aws_route_table_association" "pub-sub-3-c_route_table_association" { 167 | subnet_id = aws_subnet.pub-sub-3-c.id 168 | route_table_id = aws_route_table.public_route_table.id 169 | } 170 | 171 | 172 | #------------------------------------------------------ 3 Elastic IP for 3 NAT-GW ----------------------------------------------------------------------- 173 | 174 | # allocate elastic ip. this eip will be used for the nat-gateway in the public subnet pub-sub-1-a 175 | resource "aws_eip" "EIP-NAT-GW-A" { 176 | vpc = true 177 | 178 | tags = { 179 | Name = "NAT-GW-EIP-A" 180 | } 181 | } 182 | # allocate elastic ip. this eip will be used for the nat-gateway in the public subnet pub-sub-2-b 183 | resource "aws_eip" "EIP-NAT-GW-B" { 184 | vpc = true 185 | 186 | tags = { 187 | Name = "NAT-GW-EIP-B" 188 | } 189 | } 190 | # allocate elastic ip. this eip will be used for the nat-gateway in the public subnet pub-sub-3-c 191 | resource "aws_eip" "EIP-NAT-GW-C" { 192 | vpc = true 193 | 194 | tags = { 195 | Name = "NAT-GW-EIP-C" 196 | } 197 | } 198 | 199 | #------------------------------------------------------ NAT-GW ----------------------------------------------------------------------- 200 | 201 | # create nat gateway in public subnet pub-sub-1-a 202 | resource "aws_nat_gateway" "NAT-GW-A" { 203 | allocation_id = aws_eip.EIP-NAT-GW-A.id 204 | subnet_id = aws_subnet.pub-sub-1-a.id 205 | 206 | tags = { 207 | Name = "NAT-GW-A" 208 | } 209 | 210 | # to ensure proper ordering, it is recommended to add an explicit dependency 211 | depends_on = [aws_vpc.vpc] 212 | } 213 | # create nat gateway in public subnet pub-sub-2-b 214 | resource "aws_nat_gateway" "NAT-GW-B" { 215 | allocation_id = aws_eip.EIP-NAT-GW-B.id 216 | subnet_id = aws_subnet.pub-sub-2-b.id 217 | 218 | tags = { 219 | Name = "NAT-GW-B" 220 | } 221 | 222 | # to ensure proper ordering, it is recommended to add an explicit dependency 223 | depends_on = [aws_vpc.vpc] 224 | } 225 | # create nat gateway in public subnet pub-sub-3-c 226 | resource "aws_nat_gateway" "NAT-GW-C" { 227 | allocation_id = aws_eip.EIP-NAT-GW-C.id 228 | subnet_id = aws_subnet.pub-sub-3-c.id 229 | 230 | tags = { 231 | Name = "NAT-GW-C" 232 | } 233 | 234 | # to ensure proper ordering, it is recommended to add an explicit dependency 235 | depends_on = [aws_vpc.vpc] 236 | } 237 | 238 | #------------------------------------------------------ Private RTs ----------------------------------------------------------------------- 239 | 240 | # create private route table Pri-RT-A and add route through NAT-GW-A 241 | resource "aws_route_table" "Pri-RT-A" { 242 | vpc_id = aws_vpc.vpc.id 243 | 244 | route { 245 | cidr_block = "0.0.0.0/0" 246 | nat_gateway_id = aws_nat_gateway.NAT-GW-A.id 247 | } 248 | 249 | tags = { 250 | Name = "Pri-RT-A" 251 | } 252 | } 253 | # create private route table Pri-RT-B and add route through NAT-GW-B 254 | resource "aws_route_table" "Pri-RT-B" { 255 | vpc_id = aws_vpc.vpc.id 256 | 257 | route { 258 | cidr_block = "0.0.0.0/0" 259 | nat_gateway_id = aws_nat_gateway.NAT-GW-B.id 260 | } 261 | 262 | tags = { 263 | Name = "Pri-RT-B" 264 | } 265 | } 266 | # create private route table Pri-RT-C and add route through NAT-GW-C 267 | resource "aws_route_table" "Pri-RT-C" { 268 | vpc_id = aws_vpc.vpc.id 269 | 270 | route { 271 | cidr_block = "0.0.0.0/0" 272 | nat_gateway_id = aws_nat_gateway.NAT-GW-C.id 273 | } 274 | 275 | tags = { 276 | Name = "Pri-RT-C" 277 | } 278 | } 279 | 280 | 281 | #------------------------------------------------------ Private subnet association ----------------------------------------------------------------------- 282 | 283 | # associate private subnet pri-sub-4-a with private route table Pri-RT-A 284 | resource "aws_route_table_association" "pri-sub-4-a-with-Pri-RT-A" { 285 | subnet_id = aws_subnet.pri-sub-4-a.id 286 | route_table_id = aws_route_table.Pri-RT-A.id 287 | } 288 | # associate private subnet pri-sub-7-a with private route table Pri-RT-A 289 | resource "aws_route_table_association" "pri-sub-7-a-with-Pri-RT-A" { 290 | subnet_id = aws_subnet.pri-sub-7-a.id 291 | route_table_id = aws_route_table.Pri-RT-A.id 292 | } 293 | 294 | # associate private subnet pri-sub-5-b with private route table Pri-RT-B 295 | resource "aws_route_table_association" "pri-sub-5-b-with-Pri-RT-B" { 296 | subnet_id = aws_subnet.pri-sub-5-b.id 297 | route_table_id = aws_route_table.Pri-RT-B.id 298 | } 299 | # associate private subnet pri-sub-8-b with private route table Pri-RT-B 300 | resource "aws_route_table_association" "pri-sub-8-b-with-Pri-RT-B" { 301 | subnet_id = aws_subnet.pri-sub-8-b.id 302 | route_table_id = aws_route_table.Pri-RT-B.id 303 | } 304 | 305 | # associate private subnet pri-sub-6-c with private route table Pri-RT-C 306 | resource "aws_route_table_association" "pri-sub-6-c-with-Pri-RT-C" { 307 | subnet_id = aws_subnet.pri-sub-6-c.id 308 | route_table_id = aws_route_table.Pri-RT-C.id 309 | } 310 | # associate private subnet pri-sub-9-c with private route table Pri-RT-C 311 | resource "aws_route_table_association" "pri-sub-9-c-with-Pri-RT-C" { 312 | subnet_id = aws_subnet.pri-sub-9-c.id 313 | route_table_id = aws_route_table.Pri-RT-C.id 314 | } 315 | 316 | -------------------------------------------------------------------------------- /module/VPC/output.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | output "REGION" { 4 | value = var.REGION 5 | } 6 | 7 | output "PROJECT_NAME" { 8 | value = var.PROJECT_NAME 9 | } 10 | output "VPC_ID" { 11 | value = aws_vpc.vpc.id 12 | } 13 | 14 | output "PUB_SUB_1_A_ID" { 15 | value = aws_subnet.pub-sub-1-a.id 16 | } 17 | output "PUB_SUB_2_B_ID" { 18 | value = aws_subnet.pub-sub-2-b.id 19 | 20 | } 21 | output "PUB_SUB_3_C_ID" { 22 | value = aws_subnet.pub-sub-3-c.id 23 | 24 | } 25 | 26 | output "PRI_SUB_4_A_ID" { 27 | value = aws_subnet.pri-sub-4-a.id 28 | 29 | } 30 | output "PRI_SUB_5_B_ID" { 31 | value = aws_subnet.pri-sub-5-b.id 32 | 33 | } 34 | output "PRI_SUB_6_C_ID" { 35 | value = aws_subnet.pri-sub-6-c.id 36 | 37 | } 38 | output "PRI_SUB_7_A_ID" { 39 | value = aws_subnet.pri-sub-7-a.id 40 | 41 | } 42 | output "PRI_SUB_8_B_ID" { 43 | value = aws_subnet.pri-sub-8-b.id 44 | 45 | } 46 | output "PRI_SUB_9_C_ID" { 47 | value = aws_subnet.pri-sub-9-c.id 48 | 49 | } 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /module/VPC/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "REGION" {} 3 | 4 | variable "PROJECT_NAME" {} 5 | 6 | variable "VPC_CIDR" {} 7 | 8 | variable "PUB_SUB_1_A_CIDR" {} 9 | 10 | variable "PUB_SUB_2_B_CIDR" {} 11 | variable "PUB_SUB_3_C_CIDR" {} 12 | 13 | variable "PRI_SUB_4_A_CIDR" {} 14 | variable "PRI_SUB_5_B_CIDR" {} 15 | variable "PRI_SUB_6_C_CIDR" {} 16 | variable "PRI_SUB_7_A_CIDR" {} 17 | variable "PRI_SUB_8_B_CIDR" {} 18 | variable "PRI_SUB_9_C_CIDR" {} 19 | 20 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # 🌍 Terraform script to provisiond MongoDB sharded cluster on AWS 2 | 3 | Welcome to the MongoDB sharded cluster provisioning repository! Here, you'll find all the necessary resources and instructions to set up a highly available and scalable MongoDB sharded cluster on the AWS cloud. 4 | 5 | ## 🏠 Architecture 6 | ![Architecture of the application](architecture.gif) 7 | 8 | ## 🚀 Why Use a Sharded Cluster? 9 | A sharded cluster is designed to handle large datasets and high workloads by distributing data across multiple machines. This architecture offers horizontal scalability, making it an ideal choice for applications that require seamless scaling as user demand grows. With automatic data distribution and failover mechanisms, a sharded cluster ensures both data availability and reliability. 10 | 11 | 12 | ## 🖥️ Installation of Terraform 13 | **Note** To get started, make sure you have Terraform installed on your machine. The provided installation script helps you set up Terraform quickly on an Amazon Linux environment. 14 | 15 | ```sh 16 | #!/bin/bash 17 | 18 | # fail on any error 19 | set -eu 20 | 21 | # install yum-config-manager to manage your repositories 22 | sudo yum install -y yum-utils 23 | 24 | # use yum-config-manager to add the official HashiCorp Linux repository 25 | sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo 26 | 27 | # install terraform 28 | sudo yum -y install terraform 29 | 30 | # verify terraform is installed 31 | terraform --version 32 | ``` 33 | 34 | ## 📦 Cloning the Repository 35 | Start by cloning this repository to your local machine. Navigate into the project directory to access all the necessary files for setting up the MongoDB sharded cluster. 36 | 37 | ```sh 38 | git clone https://github.com/AnkitJodhani/Terraform-Production-ready-mongodb-project6.git 39 | cd Terraform-Production-ready-mongodb-project6 40 | ``` 41 | 42 | ## 🔑 Generating SSH Keys 43 | Secure communication between cluster components is essential. Follow the steps provided to generate SSH keys and store them in the designated directory `/module/KEY/`. These keys will be used for authentication purposes within the cluster. 44 | **Note** We need to generate Public key and private key and need to store in `/module/KEY/` directory. 45 | 46 | ```sh 47 | cd module/KEY/ 48 | ssh-keygen.exe 49 | ``` 50 | 51 | **Note:** Please rename the with *controller.pub* & *controller* because that is predefinde in terraform script. 52 | 53 | ## Ansible (Optional) 54 | If you prefer an automated approach to configuring your MongoDB sharded cluster, consider exploring the Ansible integration.The provided [Ansible script](https://github.com/AnkitJodhani/Ansible-Production-ready-mongodb-project6.git) repository offers automation for various aspects of cluster management. 55 | 56 | ## Blog link 57 | For a comprehensive guide on setting up a production-ready MongoDB sharded cluster using both Terraform and Ansible, check out the accompanying [blog post](https://github.com/AnkitJodhani/Ansible-Production-ready-mongodb-project6.git). The blog covers step-by-step instructions, best practices, and insights into optimizing your cluster for performance and reliability. 58 | 59 | ## configuration 60 | 61 | Customize the `terraform.tfvars` file to tailor the cluster setup to your specific requirements. Set variables such as the region, project name, instance sizes, and network configurations. 62 | 63 | ```sh 64 | vim main/terraform.tfvars 65 | ``` 66 | 67 | file containe many importnat varialbe that will be needed terraform to spin up the desired infrastrcutre. 68 | 69 | ```sh 70 | REGION = "" 71 | PROJECT_NAME = "" 72 | CPU = "" 73 | AMI = "" 74 | MAX_SIZE = "" 75 | MIN_SIZE = "" 76 | DESIRED_CAP = "" 77 | VPC_CIDR = "" 78 | PUB_SUB_1_A_CIDR = "" 79 | PUB_SUB_2_B_CIDR = "" 80 | PUB_SUB_3_C_CIDR = "" 81 | PRI_SUB_4_A_CIDR = "" 82 | PRI_SUB_5_B_CIDR = "" 83 | PRI_SUB_6_C_CIDR = "" 84 | PRI_SUB_7_A_CIDR = "" 85 | PRI_SUB_8_B_CIDR = "" 86 | PRI_SUB_9_C_CIDR = "" 87 | 88 | ``` 89 | 90 | ## ✈️ Now we are ready to deploy our application on cloud ⛅ 91 | get into project directory 92 | ```sh 93 | cd Terraform-Production-ready-mongodb-project6/main/ 94 | ``` 95 | 96 | type below command to see plan of the exection 97 | ```sh 98 | terraform plan 99 | ``` 100 | 101 | ✨Finally, HIT the below command to deploy the application... 102 | ```sh 103 | terraform apply 104 | ``` 105 | 106 | type `yes`, it will prompt you for permission.. 107 | 108 | ## 🎉 Congratulations! 109 | 110 | Your MongoDB sharded cluster is now up and running on the AWS cloud! You've successfully set up a powerful and scalable database system that can handle your application's growing needs. If you have any questions or encounter any issues, refer to the blog post or reach out to the community for assistance. 111 | 112 | **Thank you for choosing this repository for your MongoDB cluster deployment. Happy coding! 🚀** --------------------------------------------------------------------------------