├── .gitignore ├── LICENSE ├── README.md ├── _docs └── RabbitMQClusterAWS.png ├── cloudwatch.tf ├── example ├── provider.tf ├── rabbit.auto.tfvars ├── settings.auto.tfvars └── variables.tf ├── iam.tf ├── output.tf ├── policies ├── ProxyPolicies.json └── ProxyRole.json ├── rabbit-node.tf ├── rabbitmq-alb ├── dns.tf ├── internal.tf └── variables.tf ├── security.tf ├── user_data └── rabbitmq.sh └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | *.backup 2 | .terraform 3 | *.tfstate -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 CitizenPlane 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![licence](https://img.shields.io/github/license/citizenplane/terraform-aws-rabbitmq.svg) 2 | 3 | # Rabbitmq AWS Module 4 | This repository is a set of two modules: 5 | - One to create an Auto Scaling Group that will bind rabbitmq nodes together using the rabbitmq plugins: 6 | [rabbitmq_peer_discovery_aws](https://www.rabbitmq.com/cluster-formation.html#peer-discovery-aws) 7 | 8 | - The other to declare two new entries on a private route53 zone, and bind them to a load balencer for the web interface management plugin 9 | and the default rabbitmq TCP port in order to open new connections and channels. 10 | 11 | ![cloudcraft_schema](https://raw.githubusercontent.com/CitizenPlane/terraform-aws-rabbitmq/master/_docs/RabbitMQClusterAWS.png) 12 | 13 | ## How to use this Module 14 | 15 | This module purpose is only to create a Rabbitmq Cluster and the routes to access it. 16 | It does not include the creation of a *VPC* nor the *route53* zone used to access the Load balancer. 17 | 18 | I'll let you refer to our other modules if you want to use them, otherwise it should be easy enough to plug this module in an already exisiting VPC (the alb beeing optional too). 19 | 20 | Apart from the network, there is not much configuration to do as you can see in the example folder. Here are the main settings: 21 | 22 | ```hcl 23 | module "rabbit" { 24 | source = "path/to/module" 25 | 26 | name = "An useful name to identify your clustser" 27 | environment = "Specify the environment (Prod/Staging/Test/whatever...)" 28 | 29 | # To bind the manager together, Rabbitmq uses the Erlang cookie so it knows they can join the cluster 30 | erl_secret_cookie = "a random secret key" 31 | # As we use the rabbit_peer_discovery_aws we need credentials that can inspect ec2 or asg groups 32 | 33 | # https://www.rabbitmq.com/cluster-formation.html#peer-discovery-aws 34 | aws_access_key = "KEY" 35 | 36 | aws_secret_key = "SECRET" 37 | 38 | # See example for full usage of this var, here it's pass so we can name the cluster rabbimtq 39 | # https://github.com/CitizenPlane/terraform-aws-rabbitmq/blob/dc123d34742202811455d1bea50cb5f779186d2f/user_data/rabbitmq.sh#L122 40 | cluster_fqdn = "test" 41 | 42 | region = "eu-west-3" 43 | ssh_key_name = "ft_ssh_key" 44 | desired_capacity = 3 45 | autoscaling_min_size = 3 46 | autoscaling_max_size = 5 47 | instance_ebs_optimized = false 48 | 49 | vpc_id = "vpc_id" 50 | 51 | # Subnets Zone where the ASG will create your EC2 instances 52 | external_subnets = "" 53 | 54 | root_volume_size = 20 # / 55 | rabbit_volume_size = 50 # /var/lib/rabbitmq 56 | 57 | associate_public_ip_address = true 58 | 59 | # Note : AMI are region related. Make sure the AMI you choose is available in your region 60 | # https://cloud-images.ubuntu.com/locator/ec2/ 61 | image_id = "" 62 | 63 | # You define the CIDR block that can reach your private ip in your VPC 64 | # Don't forget to include your EC2 instances 65 | # Any Network Interface that may need to access this cluster ECR ELB ALB ..... 66 | ingress_private_cidr_blocks = [ 67 | "192.x.x.x/24", 68 | "10.x.x.x/22", 69 | "172.x.x.x/16", 70 | ] 71 | 72 | # A set of Public IPs that can access the cluster from oustide your VPC 73 | # For instance, these will be used to restrict the Rabbitmq management web interface access 74 | ingress_public_cidr_blocks = [ 75 | "88.x.x.x/32", 76 | "195.x.x.x/32", 77 | ] 78 | 79 | # This is egress only settings for traffic going outside your VPC. You may not want your cluster 80 | # to be able to reach any ip from oustide your network 81 | internet_public_cidr_blocks = [ 82 | "0.0.0.0/0", 83 | ] 84 | 85 | instance_type = "" 86 | 87 | az_count = 3 88 | 89 | cpu_high_limit = "70" 90 | cpu_low_limit = "20" 91 | memory_high_limit = "70" 92 | memory_low_limit = "20" 93 | } 94 | ``` 95 | 96 | 97 | ## CitizenPlane 98 | 99 | *Starship Troopers narrator voice*: 100 | Would you like to know more ? CitizenPlane is hiring take a look [here](https://www.notion.so/citizenplane/Current-offers-a29fe322e68c4fb4aa5cb6d628d49108) 101 | -------------------------------------------------------------------------------- /_docs/RabbitMQClusterAWS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CitizenPlane/terraform-aws-rabbitmq/c1963a19b416de41bce5823383ed32fc5f41b69b/_docs/RabbitMQClusterAWS.png -------------------------------------------------------------------------------- /cloudwatch.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_metric_alarm" "node_cpu_high" { 2 | alarm_name = "${var.name}-${var.environment}-node-cpureservation-high" 3 | comparison_operator = "GreaterThanOrEqualToThreshold" 4 | evaluation_periods = "2" 5 | metric_name = "CPUUtilization" 6 | namespace = "AWS/EC2" 7 | period = "300" 8 | statistic = "Maximum" 9 | threshold = "70" 10 | 11 | dimensions = { 12 | AutoScalingGroupName = aws_autoscaling_group.rabbit-node.name 13 | } 14 | 15 | alarm_description = "Scale up if the cpu reservation is above 70% for 10 minutes" 16 | alarm_actions = [aws_autoscaling_policy.rabbit-node-scale-up.arn] 17 | 18 | lifecycle { 19 | create_before_destroy = true 20 | } 21 | } 22 | 23 | resource "aws_cloudwatch_metric_alarm" "node_memory_high" { 24 | alarm_name = "${var.name}-${var.environment}-node-memoryreservation-high" 25 | comparison_operator = "GreaterThanOrEqualToThreshold" 26 | evaluation_periods = "2" 27 | metric_name = "MemoryReservation" 28 | namespace = "AWS/EC2" 29 | period = "300" 30 | statistic = "Maximum" 31 | threshold = "60" 32 | 33 | dimensions = { 34 | AutoScalingGroupName = aws_autoscaling_group.rabbit-node.name 35 | } 36 | 37 | alarm_description = "Scale up if the memory reservation is above 70% for 10 minutes" 38 | alarm_actions = [aws_autoscaling_policy.rabbit-node-scale-up.arn] 39 | 40 | lifecycle { 41 | create_before_destroy = true 42 | } 43 | 44 | # This is required to make cloudwatch alarms creation sequential, AWS doesn't 45 | # support modifying alarms concurrently. 46 | depends_on = ["aws_cloudwatch_metric_alarm.node_cpu_high"] 47 | } 48 | 49 | resource "aws_cloudwatch_metric_alarm" "node_cpu_low" { 50 | alarm_name = "${var.name}-${var.environment}-node-cpureservation-low" 51 | comparison_operator = "LessThanOrEqualToThreshold" 52 | evaluation_periods = "2" 53 | metric_name = "CPUUtilization" 54 | namespace = "AWS/EC2" 55 | period = "300" 56 | statistic = "Maximum" 57 | threshold = "20" 58 | 59 | dimensions = { 60 | AutoScalingGroupName = aws_autoscaling_group.rabbit-node.name 61 | } 62 | 63 | alarm_description = "Scale down if the cpu reservation is below 20% for 10 minutes" 64 | alarm_actions = [aws_autoscaling_policy.rabbit-node-scale-down.arn] 65 | 66 | lifecycle { 67 | create_before_destroy = true 68 | } 69 | 70 | # This is required to make cloudwatch alarms creation sequential, AWS doesn't 71 | # support modifying alarms concurrently. 72 | depends_on = ["aws_cloudwatch_metric_alarm.node_memory_high"] 73 | } 74 | 75 | resource "aws_cloudwatch_metric_alarm" "node_memory_low" { 76 | alarm_name = "${var.name}-${var.environment}-node-memoryreservation-low" 77 | comparison_operator = "LessThanOrEqualToThreshold" 78 | evaluation_periods = "2" 79 | metric_name = "MemoryReservation" 80 | namespace = "AWS/EC2" 81 | period = "300" 82 | statistic = "Maximum" 83 | threshold = "20" 84 | 85 | dimensions = { 86 | AutoScalingGroupName = aws_autoscaling_group.rabbit-node.name 87 | } 88 | 89 | alarm_description = "Scale down if the memory reservation is below 20% for 10 minutes" 90 | alarm_actions = [aws_autoscaling_policy.rabbit-node-scale-down.arn] 91 | 92 | lifecycle { 93 | create_before_destroy = true 94 | } 95 | 96 | # This is required to make cloudwatch alarms creation sequential, AWS doesn't 97 | # support modifying alarms concurrently. 98 | depends_on = ["aws_cloudwatch_metric_alarm.node_cpu_low"] 99 | } 100 | -------------------------------------------------------------------------------- /example/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "${var.region}" 3 | profile = "${var.profile}" 4 | } 5 | 6 | module "alb" { 7 | source = "../rabbitmq-alb" 8 | 9 | # # General settings 10 | environment = "${var.environment}" 11 | name = "${var.cluster_name}" 12 | 13 | internal = false 14 | domain_name = "${var.domain_name}" 15 | datacenter = "${var.region}" 16 | 17 | cluster_fqdn = "${var.cluster_fqdn}" 18 | 19 | # # Network 20 | subnet_ids = "${var.subnet_ids}" 21 | 22 | # # Autoscaling target group 23 | # # Note: only the servers need an ALB (only the servers expose an UI) 24 | autoscaling_group = "${module.rabbit.autoscaling_group}" 25 | 26 | allowed_inbound_cidr_blocks = "${var.ingress_public_cidr_blocks}" 27 | alb_security_group = "${module.rabbit.security_group_id}" 28 | 29 | # # External Settings 30 | certificate_arn = "${var.certificate_arn}" 31 | vpc_id = "${var.vpc_id}" 32 | } 33 | 34 | module "rabbit" { 35 | source = "../" 36 | 37 | name = "${var.cluster_name}" 38 | environment = "${var.environment}" 39 | 40 | erl_secret_cookie = "${var.erl_secret_cookie}" 41 | aws_access_key = "${var.aws_access_key}" 42 | aws_secret_key = "${var.aws_secret_key}" 43 | 44 | cluster_fqdn = "${var.cluster_fqdn}" 45 | 46 | region = "${var.region}" 47 | ssh_key_name = "${var.ssh_key_name}" 48 | desired_capacity = "${var.desired_capacity}" 49 | instance_ebs_optimized = "${var.instance_ebs_optimized}" 50 | 51 | vpc_id = "${var.vpc_id}" 52 | external_subnets = "${var.external_subnets}" 53 | 54 | root_volume_size = "${var.root_volume_size}" 55 | rabbit_volume_size = "${var.rabbit_volume_size}" 56 | 57 | associate_public_ip_address = "${var.associate_public_ip_address}" 58 | 59 | image_id = "${var.image_id}" 60 | 61 | ingress_private_cidr_blocks = "${var.ingress_private_cidr_blocks}" 62 | ingress_public_cidr_blocks = "${var.ingress_public_cidr_blocks}" 63 | internet_public_cidr_blocks = "${var.internet_public_cidr_blocks}" 64 | 65 | instance_type = "${var.instance_type}" 66 | 67 | az_count = "${var.az_count}" 68 | 69 | cpu_high_limit = "70" 70 | cpu_low_limit = "20" 71 | memory_high_limit = "70" 72 | memory_low_limit = "20" 73 | } 74 | -------------------------------------------------------------------------------- /example/rabbit.auto.tfvars: -------------------------------------------------------------------------------- 1 | ## ssh key registered on aws and used to connect to ec2 instances 2 | ssh_key_name = "Insert your ssh key name here" 3 | 4 | ## Disk 5 | root_volume_size = 50 # / 6 | rabbit_volume_size = 50 # /var/lib/rabbitmq 7 | instance_ebs_optimized = false 8 | 9 | ## AMI 10 | # Note : AMI are region-related make sure the AMI you choose is available in your region 11 | # https://cloud-images.ubuntu.com/locator/ec2/ 12 | image_id = "insert ubuntu ami related to your region" 13 | 14 | # Manager 15 | # If you don't have a private VPN connection configured set this to true so you can access your cluster 16 | associate_public_ip_address = false 17 | instance_type = "t3.medium" 18 | desired_capacity = 3 19 | 20 | # To bind the manager together, Rabbitmq uses the Erlang cookie so it knows they can join the cluster 21 | erl_secret_cookie = "a random secret key" 22 | 23 | # As we use the rabbit_peer_discovery_aws, we need credentials that can inspect ec2 or asg groups 24 | # https://www.rabbitmq.com/cluster-formation.html#peer-discovery-aw 25 | aws_access_key = "" 26 | -------------------------------------------------------------------------------- /example/settings.auto.tfvars: -------------------------------------------------------------------------------- 1 | # Your aws profile setup in ~/.aws/credentials 2 | profile = "default" 3 | region = "" 4 | 5 | # Id of the VPC you want to use 6 | vpc_id = "" 7 | 8 | # The following settings are based on how we do things @Citizenplane 9 | # You may choose whatever suits you as environment name 10 | 11 | # 1 - admin 12 | # 2 - production 13 | # 3 - pre-production 14 | # 4 - development 15 | 16 | environment = "admin" 17 | 18 | # Unique name of the selected environment 19 | cluster_name = "ft_rabbitmq_cluster" 20 | 21 | # This name must be unique as it will be used to create the egress dns record ! 22 | # make sure no other environment is using it 23 | # eg: test ==> test.domain.com 24 | cluster_fqdn = "test" 25 | 26 | 27 | # Assuming you have a route53 managed domain 28 | domain_name = "domain.com" 29 | certificate_arn = "" 30 | 31 | ## Network 32 | 33 | # The ids of your already existings subnets by availability zones 34 | subnet_ids = ["zoneA-id", "zoneB-id", "zoneC-id"] 35 | 36 | # You define the CIDR block that can reach your private ip in your VPC 37 | # Don't forget to include your EC2 instances 38 | # Any Network Interface that may need to access this cluster ECR ELB ALB ..... 39 | ingress_private_cidr_blocks = [ 40 | "192.x.x.x/24", 41 | "10.x.x.x/22", 42 | "172.x.x.x/16" 43 | ] 44 | 45 | # A set of Public IPs that can access the cluster from oustide your VPC 46 | # For instance, these will be used to restrict the Rabbitmq management web interface access 47 | ingress_public_cidr_blocks = [ 48 | "88.x.x.x/32", 49 | "195.x.x.x/32" 50 | ] 51 | 52 | # This is egress only settings for traffic going oustide your VPC. You may not want your cluster 53 | # to be able to reach any ip from oustide your network 54 | internet_public_cidr_blocks = [ 55 | "0.0.0.0/0" 56 | ] 57 | 58 | # Subnets Zone where the ASG will create your EC2 instances. These may differ from the subnet_ids of the ALB 59 | # But be careful that your routing table is correctly configured to map traffic from one subnet to another 60 | external_subnets = [ 61 | "zoneA-id", 62 | "zoneB-id", 63 | "zoneC-id" 64 | ] 65 | -------------------------------------------------------------------------------- /example/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ssh_key_name" { 2 | description = "The aws ssh key name used to connect to any ec2 instances" 3 | } 4 | 5 | variable "profile" { 6 | description = "aws local profile to use" 7 | } 8 | 9 | variable "environment" { 10 | description = "type of environment you are deploying (eg: dev, prod, staging)" 11 | } 12 | 13 | variable "cluster_name" { 14 | description = "Name of your deployment (eg: CitizenPlane)" 15 | } 16 | 17 | variable "desired_capacity" { 18 | description = "Default size of your manager swarm (1, 3, 5)" 19 | } 20 | 21 | variable "root_volume_size" { 22 | description = "Size of the filesystem mounted on `/`" 23 | } 24 | 25 | variable "rabbit_volume_size" { 26 | description = "Size of the docker filesystem mount point" 27 | } 28 | 29 | variable "image_id" { 30 | description = "Aws ami to be used by ec2 instances" 31 | } 32 | 33 | variable "instance_ebs_optimized" { 34 | description = "Enable instance with optimized hard drive" 35 | } 36 | 37 | variable "associate_public_ip_address" { 38 | description = "Enable public ip on manager" 39 | } 40 | 41 | variable "az_count" { 42 | default = 3 43 | description = "availability zone number" 44 | } 45 | 46 | variable "instance_type" {} 47 | 48 | variable "erl_secret_cookie" { 49 | description = "Used by rabbitmq to join a cluster" 50 | } 51 | 52 | variable "aws_access_key" { 53 | description = "Used by rabbitmq to describe autoscaling group" 54 | } 55 | 56 | variable "aws_secret_key" { 57 | description = "Used by rabbitmq to describe autoscaling group" 58 | } 59 | 60 | variable "cluster_fqdn" { 61 | description = "a subdomain for your route53 dns" 62 | } 63 | 64 | variable "ingress_private_cidr_blocks" { 65 | type = "list" 66 | } 67 | 68 | variable "ingress_public_cidr_blocks" { 69 | type = "list" 70 | } 71 | 72 | variable "internet_public_cidr_blocks" { 73 | type = "list" 74 | } 75 | 76 | variable "external_subnets" { 77 | description = "A list of one or more availability zones for the ASG" 78 | type = "list" 79 | } 80 | 81 | variable "vpc_id" {} 82 | variable "region" {} 83 | 84 | variable "certificate_arn" {} 85 | 86 | variable "subnet_ids" { 87 | type = "list" 88 | } 89 | 90 | variable "domain_name" {} 91 | -------------------------------------------------------------------------------- /iam.tf: -------------------------------------------------------------------------------- 1 | # Policies 2 | resource "aws_iam_role" "ProxyRole" { 3 | name = "${var.name}-${var.environment}-ProxyRole" 4 | assume_role_policy = file("${path.module}/policies/ProxyRole.json") 5 | } 6 | 7 | resource "aws_iam_instance_profile" "ProxyInstanceProfile" { 8 | name = "${var.name}-${var.environment}-ProxyInstanceProfile" 9 | role = aws_iam_role.ProxyRole.name 10 | } 11 | 12 | resource "aws_iam_role_policy" "ProxyPolicies" { 13 | name = "${var.name}-${var.environment}-ProxyPolicies" 14 | policy = file("${path.module}/policies/ProxyPolicies.json") 15 | role = aws_iam_role.ProxyRole.name 16 | } 17 | -------------------------------------------------------------------------------- /output.tf: -------------------------------------------------------------------------------- 1 | # Security used by the nodes and share to the load balencer 2 | output "security_group_id" { 3 | value = aws_security_group.lb-external.id 4 | } 5 | 6 | # Used by attachment target group 7 | output "autoscaling_group" { 8 | value = aws_autoscaling_group.rabbit-node.id 9 | } 10 | -------------------------------------------------------------------------------- /policies/ProxyPolicies.json: -------------------------------------------------------------------------------- 1 | { 2 | "Statement": [ 3 | { 4 | "Action": "elasticloadbalancing:*", 5 | "Effect": "Allow", 6 | "Resource": "*" 7 | } 8 | ], 9 | "Version": "2012-10-17" 10 | } 11 | -------------------------------------------------------------------------------- /policies/ProxyRole.json: -------------------------------------------------------------------------------- 1 | { 2 | "Statement": [ 3 | { 4 | "Action": [ 5 | "sts:AssumeRole" 6 | ], 7 | "Effect": "Allow", 8 | "Principal": { 9 | "Service": [ 10 | "ec2.amazonaws.com", 11 | "autoscaling.amazonaws.com" 12 | ] 13 | } 14 | } 15 | ], 16 | "Version": "2012-10-17" 17 | } 18 | -------------------------------------------------------------------------------- /rabbit-node.tf: -------------------------------------------------------------------------------- 1 | # Template use at launch to install docker 2 | # It will also lauch each docker container that are used to manage the state of our cluster 3 | # This is use to pass required settings from terraform template directly in the ED2 instance 4 | data "template_file" "rabbit-node" { 5 | template = file("${path.module}/user_data/rabbitmq.sh") 6 | 7 | vars = { 8 | AWS_REGION = var.region 9 | VPC_ID = var.vpc_id 10 | ERL_SECRET_COOKIE = var.erl_secret_cookie 11 | AWS_ACCESS_KEY = var.aws_access_key 12 | AWS_SECRET_KEY = var.aws_secret_key 13 | RABBITMQ_VERSION = var.rabbitmq_version 14 | ERLANG_VERSION = var.erlang_version 15 | CLUSTER_NAME = "${var.cluster_fqdn}-${var.name}-${var.environment}" 16 | } 17 | } 18 | 19 | resource "aws_launch_configuration" "rabbit-node" { 20 | name_prefix = "${var.name}-${var.environment}-rabbit-" 21 | 22 | image_id = var.image_id 23 | instance_type = var.instance_type 24 | ebs_optimized = var.instance_ebs_optimized 25 | 26 | iam_instance_profile = aws_iam_instance_profile.ProxyInstanceProfile.name 27 | key_name = var.ssh_key_name 28 | 29 | security_groups = [ 30 | aws_security_group.rabbit-cluster.id, 31 | aws_security_group.rabbit-node.id, 32 | ] 33 | 34 | # User Data is what's run at start from the template file previously rendered 35 | user_data = data.template_file.rabbit-node.rendered 36 | associate_public_ip_address = var.associate_public_ip_address 37 | 38 | # root 39 | root_block_device { 40 | volume_type = "gp2" 41 | volume_size = var.root_volume_size 42 | } 43 | 44 | # rabbit 45 | ebs_block_device { 46 | device_name = "/dev/xvdcz" 47 | volume_type = "gp2" 48 | volume_size = var.rabbit_volume_size 49 | } 50 | 51 | lifecycle { 52 | create_before_destroy = true 53 | } 54 | } 55 | 56 | resource "aws_autoscaling_group" "rabbit-node" { 57 | name = "${var.name}-${var.environment}-rabbit" 58 | 59 | launch_configuration = aws_launch_configuration.rabbit-node.name 60 | vpc_zone_identifier = var.external_subnets 61 | min_size = var.autoscaling_min_size 62 | max_size = var.autoscaling_max_size 63 | desired_capacity = var.desired_capacity 64 | termination_policies = ["OldestLaunchConfiguration", "Default"] 65 | 66 | health_check_type = "EC2" 67 | health_check_grace_period = 300 68 | 69 | tag { 70 | key = "Name" 71 | value = "${var.name}-${var.environment}-rabbit" 72 | propagate_at_launch = true 73 | } 74 | 75 | tag { 76 | key = "Cluster" 77 | value = "${var.name}-${var.environment}-cluster" 78 | propagate_at_launch = true 79 | } 80 | 81 | tag { 82 | key = "Environment" 83 | value = var.environment 84 | propagate_at_launch = true 85 | } 86 | 87 | lifecycle { 88 | create_before_destroy = true 89 | } 90 | } 91 | 92 | resource "aws_autoscaling_policy" "rabbit-node-scale-up" { 93 | name = "${var.name}-${var.environment}-rabbit-node-up" 94 | scaling_adjustment = 1 95 | adjustment_type = "ChangeInCapacity" 96 | cooldown = 300 97 | autoscaling_group_name = aws_autoscaling_group.rabbit-node.name 98 | 99 | lifecycle { 100 | create_before_destroy = true 101 | } 102 | } 103 | 104 | resource "aws_autoscaling_policy" "rabbit-node-scale-down" { 105 | name = "${var.name}-${var.environment}-rabbit-node-down" 106 | scaling_adjustment = -1 107 | adjustment_type = "ChangeInCapacity" 108 | cooldown = 300 109 | autoscaling_group_name = aws_autoscaling_group.rabbit-node.name 110 | 111 | lifecycle { 112 | create_before_destroy = true 113 | } 114 | } 115 | 116 | resource "aws_autoscaling_lifecycle_hook" "rabbit-node-upgrade" { 117 | name = "${var.name}-${var.environment}-rabbit-node-upgrade-hook" 118 | autoscaling_group_name = aws_autoscaling_group.rabbit-node.name 119 | default_result = "CONTINUE" 120 | heartbeat_timeout = 2000 121 | lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" 122 | } 123 | -------------------------------------------------------------------------------- /rabbitmq-alb/dns.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "primary-private" { 2 | # private_zone = true 3 | name = var.domain_name 4 | } 5 | 6 | resource "aws_route53_record" "mgmt-internal" { 7 | zone_id = data.aws_route53_zone.primary-private.zone_id 8 | name = "${var.cluster_fqdn}.${var.domain_name}" 9 | type = "CNAME" 10 | ttl = "300" 11 | records = [aws_lb.lb_internal.dns_name] 12 | allow_overwrite = true 13 | } 14 | 15 | resource "aws_route53_record" "rabbit-internal" { 16 | zone_id = data.aws_route53_zone.primary-private.zone_id 17 | name = "service-${var.cluster_fqdn}.${var.domain_name}" 18 | type = "CNAME" 19 | ttl = "300" 20 | records = [aws_lb.lb_internal_net.dns_name] 21 | allow_overwrite = true 22 | } 23 | 24 | data "aws_route53_zone" "primary" { 25 | name = var.domain_name 26 | } 27 | 28 | resource "aws_route53_record" "mgmt" { 29 | zone_id = data.aws_route53_zone.primary.zone_id 30 | name = "${var.cluster_fqdn}.${var.domain_name}" 31 | type = "CNAME" 32 | ttl = "300" 33 | records = [aws_lb.lb_internal.dns_name] 34 | allow_overwrite = true 35 | } 36 | 37 | resource "aws_route53_record" "rabbit" { 38 | zone_id = data.aws_route53_zone.primary.zone_id 39 | name = "service-${var.cluster_fqdn}.${var.domain_name}" 40 | type = "CNAME" 41 | ttl = "300" 42 | records = [aws_lb.lb_internal_net.dns_name] 43 | allow_overwrite = true 44 | } 45 | -------------------------------------------------------------------------------- /rabbitmq-alb/internal.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "lb_internal" { 2 | name = "${var.name}-int" 3 | internal = false 4 | security_groups = [var.alb_security_group] 5 | subnets = var.subnet_ids 6 | 7 | enable_cross_zone_load_balancing = true 8 | enable_deletion_protection = false 9 | idle_timeout = 60 10 | 11 | tags = {} 12 | } 13 | 14 | resource "aws_lb" "lb_internal_net" { 15 | name = "${var.name}-net-int" 16 | internal = true 17 | load_balancer_type = "network" 18 | subnets = var.subnet_ids 19 | 20 | enable_cross_zone_load_balancing = true 21 | enable_deletion_protection = false 22 | idle_timeout = 60 23 | 24 | tags = {} 25 | } 26 | 27 | resource "aws_lb_listener" "mgmt_internal" { 28 | load_balancer_arn = aws_lb.lb_internal.arn 29 | port = "443" 30 | protocol = "HTTPS" 31 | 32 | ssl_policy = "ELBSecurityPolicy-2016-08" 33 | certificate_arn = var.certificate_arn 34 | 35 | default_action { 36 | type = "forward" 37 | target_group_arn = aws_lb_target_group.backend_mgmt_internal.arn 38 | } 39 | } 40 | 41 | resource "aws_lb_target_group" "backend_mgmt_internal" { 42 | name = "${var.name}-https-int" 43 | port = 15672 44 | protocol = "HTTP" 45 | vpc_id = var.vpc_id 46 | 47 | health_check { 48 | healthy_threshold = "2" 49 | unhealthy_threshold = "4" 50 | timeout = "2" 51 | interval = "30" 52 | port = "15672" 53 | path = "/api/aliveness-test/%2F" 54 | protocol = "HTTP" 55 | matcher = "401" 56 | } 57 | } 58 | 59 | resource "aws_autoscaling_attachment" "mgmt_https_internal-internal" { 60 | autoscaling_group_name = var.autoscaling_group 61 | alb_target_group_arn = aws_lb_target_group.backend_mgmt_internal.arn 62 | } 63 | 64 | resource "aws_lb_listener" "rabbitmq_internal" { 65 | load_balancer_arn = aws_lb.lb_internal_net.arn 66 | port = "5672" 67 | protocol = "TCP" 68 | 69 | default_action { 70 | type = "forward" 71 | target_group_arn = aws_lb_target_group.rabbitmq_internal.arn 72 | } 73 | } 74 | 75 | resource "aws_lb_target_group" "rabbitmq_internal" { 76 | name = "${var.name}-rabbit-int" 77 | port = 5672 78 | protocol = "TCP" 79 | vpc_id = var.vpc_id 80 | 81 | health_check { 82 | healthy_threshold = "2" 83 | unhealthy_threshold = "2" 84 | interval = "30" 85 | port = "5672" 86 | protocol = "TCP" 87 | } 88 | } 89 | 90 | resource "aws_autoscaling_attachment" "rabbitmq_internal-internal" { 91 | autoscaling_group_name = var.autoscaling_group 92 | alb_target_group_arn = aws_lb_target_group.rabbitmq_internal.arn 93 | } 94 | -------------------------------------------------------------------------------- /rabbitmq-alb/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables from provider 2 | 3 | variable "name" { 4 | description = "ELB name, e.g cdn" 5 | } 6 | 7 | variable "subnet_ids" { 8 | type = "list" 9 | description = "Comma separated list of subnet IDs" 10 | } 11 | 12 | variable "environment" { 13 | description = "Environment tag, e.g prod" 14 | } 15 | 16 | variable "internal" { 17 | description = "Is the ELB is internal? [boolean] " 18 | } 19 | 20 | variable "certificate_arn" { 21 | description = "Certificate for the current domain name" 22 | } 23 | 24 | variable "vpc_id" { 25 | description = "VPC to target for instance group" 26 | } 27 | 28 | variable "autoscaling_group" { 29 | description = "autoscaling group for target group" 30 | } 31 | 32 | variable "domain_name" { 33 | description = "domaine name for the alb dns record egress" 34 | } 35 | 36 | variable "cluster_fqdn" {} 37 | 38 | variable "alb_security_group" {} 39 | -------------------------------------------------------------------------------- /security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "lb-external" { 2 | name = "${var.name}-${var.environment}-lb-external" 3 | vpc_id = var.vpc_id 4 | description = "Allows traffic from and to the EC2 instances of the ${var.name} Rabbitmq LB from outside" 5 | 6 | ingress { 7 | from_port = 443 8 | to_port = 443 9 | protocol = "tcp" 10 | cidr_blocks = var.ingress_public_cidr_blocks 11 | } 12 | 13 | egress { 14 | from_port = 0 15 | to_port = 0 16 | protocol = -1 17 | cidr_blocks = var.internet_public_cidr_blocks 18 | } 19 | 20 | tags = { 21 | Name = "${var.name}-${var.environment}" 22 | Environment = var.environment 23 | } 24 | 25 | lifecycle { 26 | create_before_destroy = true 27 | } 28 | } 29 | 30 | resource "aws_security_group" "rabbit-cluster" { 31 | name = "${var.name}-${var.environment}-rabbit-cluster" 32 | vpc_id = var.vpc_id 33 | description = "Allows traffic from and to the EC2 instances of the ${var.name} Rabbit Cluster" 34 | 35 | ingress { 36 | from_port = 5672 37 | to_port = 5672 38 | protocol = "tcp" 39 | 40 | cidr_blocks = var.ingress_public_cidr_blocks 41 | } 42 | 43 | ingress { 44 | from_port = 15672 45 | to_port = 15672 46 | protocol = "tcp" 47 | cidr_blocks = var.ingress_public_cidr_blocks 48 | } 49 | 50 | egress { 51 | from_port = 0 52 | to_port = 0 53 | protocol = -1 54 | cidr_blocks = var.internet_public_cidr_blocks 55 | } 56 | 57 | tags = { 58 | Name = "${var.name}-${var.environment}" 59 | Environment = var.environment 60 | } 61 | 62 | lifecycle { 63 | create_before_destroy = true 64 | } 65 | } 66 | 67 | resource "aws_security_group" "rabbit-node" { 68 | name = "${var.name}-${var.environment}-rabbit-node" 69 | vpc_id = var.vpc_id 70 | description = "Allows traffic from and to the EC2 instances of the ${var.name} Rabbit Nodes" 71 | 72 | ingress { 73 | from_port = 4369 74 | to_port = 4369 75 | protocol = "tcp" 76 | cidr_blocks = var.ingress_private_cidr_blocks 77 | } 78 | 79 | ingress { 80 | from_port = 25672 81 | to_port = 25672 82 | protocol = "tcp" 83 | cidr_blocks = var.ingress_private_cidr_blocks 84 | } 85 | 86 | ingress { 87 | from_port = 35672 88 | to_port = 35682 89 | protocol = "tcp" 90 | cidr_blocks = var.ingress_private_cidr_blocks 91 | } 92 | 93 | ingress { 94 | from_port = 22 95 | to_port = 22 96 | protocol = "tcp" 97 | cidr_blocks = var.internet_public_cidr_blocks 98 | } 99 | 100 | egress { 101 | from_port = 0 102 | to_port = 0 103 | protocol = "icmp" 104 | cidr_blocks = var.internet_public_cidr_blocks 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /user_data/rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit on command errors 4 | set -o nounset # abort on unbound variable 5 | set -o pipefail # capture fail exit codes in piped commands 6 | 7 | # ---------------------------------------- 8 | # Mount EBS additional storage 9 | # ---------------------------------------- 10 | export MOUNT_POINT=/var/lib/rabbitmq 11 | 12 | INSTANCE_TYPE=$(wget -qO- http://169.254.169.254/latest/meta-data/instance-type | cut -d '.' -f1) 13 | [[ $INSTANCE_TYPE = "t2" ]] && EBS_NAME="xvdcz" || EBS_NAME="nvme" 14 | 15 | # If nitro based instances 16 | if [[ $EBS_NAME = "nvme" ]]; then 17 | # Test which block is the ebs added volume it's the one returning `data` 18 | # since it's not yet formated and mounted 19 | # disable failsafe pipefail here so IS_NOT_ROOT can return 0 20 | set +o pipefail 21 | IS_NOT_ROOT=$(file -s /dev/nvme0n1 | grep "data" | wc -l) 22 | set -o pipefail 23 | [[ $IS_NOT_ROOT = "1" ]] && EBS_NAME="nvme0n1" || EBS_NAME="nvme1n1" 24 | fi 25 | 26 | # Following AWS procadure (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html) 27 | 28 | mkfs -t xfs /dev/$EBS_NAME 29 | 30 | # Where you which to mount the volume after (e.g: /var/lib/docker) 31 | mkdir -p $MOUNT_POINT 32 | 33 | # Mount the formated volume 34 | mount /dev/$EBS_NAME $MOUNT_POINT 35 | 36 | # Device is mounted now we shall protect against losing the device after reboot 37 | 38 | EBS_UUID=$(blkid | grep $EBS_NAME | egrep '[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}' -o) 39 | 40 | echo "UUID=$EBS_UUID $MOUNT_POINT xfs defaults,nofail 0 2" >> /etc/fstab 41 | 42 | # ---------------------------------------- 43 | # Setpu Rabbitmq Configuration 44 | # ---------------------------------------- 45 | 46 | export RANDOM_START=$(( ( RANDOM % 30 ) + 1 )) 47 | export AWS_REGION="${AWS_REGION}" 48 | export VPC_ID="${VPC_ID}" 49 | export ERL_SECRET_COOKIE="${ERL_SECRET_COOKIE}" 50 | export AWS_SECRET_KEY="${AWS_SECRET_KEY}" 51 | export AWS_ACCESS_KEY="${AWS_ACCESS_KEY}" 52 | export CLUSTER_NAME=${CLUSTER_NAME} 53 | export RABBITMQ_VERSION=${RABBITMQ_VERSION} 54 | export ERLANG_VERSION=${ERLANG_VERSION} 55 | 56 | mkdir -p /etc/rabbitmq 57 | 58 | echo -n $ERL_SECRET_COOKIE > /var/lib/rabbitmq/.erlang.cookie 59 | chmod 600 /var/lib/rabbitmq/.erlang.cookie 60 | 61 | cat << EndOfConfig >> /etc/rabbitmq/rabbitmq.conf 62 | ## 63 | ## Security, Access Control 64 | ## ============== 65 | ## 66 | 67 | loopback_users.guest = false 68 | 69 | ## Networking 70 | ## ==================== 71 | ## 72 | ## Related doc guide: https://rabbitmq.com/networking.html. 73 | ## 74 | ## By default, RabbitMQ will listen on all interfaces, using 75 | ## the standard (reserved) AMQP 0-9-1 and 1.0 port. 76 | ## 77 | 78 | listeners.tcp.default = 5672 79 | management.listener.port = 15672 80 | management.listener.ssl = false 81 | 82 | 83 | hipe_compile = false 84 | 85 | ## 86 | ## Clustering 87 | ## ===================== 88 | ## 89 | 90 | cluster_formation.peer_discovery_backend = rabbit_peer_discovery_aws 91 | cluster_formation.aws.region = ${AWS_REGION} 92 | cluster_formation.aws.access_key_id = ${AWS_ACCESS_KEY} 93 | cluster_formation.aws.secret_key = ${AWS_SECRET_KEY} 94 | cluster_formation.aws.use_autoscaling_group = true 95 | 96 | 97 | EndOfConfig 98 | 99 | RABBITMQ_PLUGINS_FOLDER="/usr/lib/rabbitmq/plugins" 100 | 101 | mkdir -p $RABBITMQ_PLUGINS_FOLDER 102 | 103 | wget "https://github.com/noxdafox/rabbitmq-message-deduplication/releases/download/0.4.2/elixir-1.8.2.ez" -O "$RABBITMQ_PLUGINS_FOLDER/elixir-1.8.2.ez" 104 | wget "https://github.com/noxdafox/rabbitmq-message-deduplication/releases/download/0.4.2/rabbitmq_message_deduplication-0.4.2.ez" -O "$RABBITMQ_PLUGINS_FOLDER/rabbitmq_message_deduplication-0.4.2.ez" 105 | wget "https://github.com/Ayanda-D/rabbitmq-queue-master-balancer/releases/download/v0.0.5/rabbitmq_queue_master_balancer-0.0.5.ez" -O "$RABBITMQ_PLUGINS_FOLDER/rabbitmq_queue_master_balancer-0.0.5.ez" 106 | 107 | RABBITMQ_PLUGINS="[rabbitmq_shovel,rabbitmq_shovel_management,rabbitmq_management,rabbitmq_peer_discovery_aws,rabbitmq_queue_master_balancer,rabbitmq_tracing,rabbitmq_message_deduplication]." 108 | 109 | echo $RABBITMQ_PLUGINS > /etc/rabbitmq/enabled_plugins 110 | 111 | # ---------------------------------------- 112 | # Install Rabbitmq 113 | # ---------------------------------------- 114 | ## The configuration bellow was inspired by the official rabbtimq instalation guide. 115 | ## Link: https://www.rabbitmq.com/install-debian.html#apt-bintray 116 | 117 | apt-get update -y 118 | 119 | ## Install prerequisites 120 | apt-get install curl gnupg -y 121 | 122 | ## Install RabbitMQ signing key 123 | curl -fsSL https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc | apt-key add - 124 | 125 | ## Add Bintray repositories that provision latest RabbitMQ and Erlang 126 | tee /etc/apt/sources.list.d/bintray.rabbitmq.list <