├── .gitignore
├── .pre-commit-config.yaml
├── .terraform.lock.hcl
├── LICENSE
├── README.md
├── alb.tf
├── backend.tf
├── data.tf
├── docs-resources
├── rabbitmq-init-process.png
└── rabbitmq_blueprint.png
├── efs.tf
├── iam.tf
├── init.sh
├── locals.tf
├── main.tf
├── outputs.tf
├── packer
├── ansible
│ └── builder.yml
├── env.pkrvars.hcl.example
├── init.pkr.hcl
├── rabbit.pkr.hcl
└── variables.pkr.hcl
├── security-group.tf
├── terraform.tfvars.example
└── variables.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | packer/*.pkrvars.hcl
2 | *.tfvars
3 | .terraform
4 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/antonbabenko/pre-commit-terraform
3 | rev: v1.92.1
4 | hooks:
5 | - id: terraform_fmt
6 | - id: terraform_docs
7 | args:
8 | - "--args=--lockfile=false"
9 | - id: terraform_tflint
10 | args:
11 | - "--hook-config=--parallelism-ci-cpu-cores=1"
12 | - "--args=--only=terraform_deprecated_interpolation"
13 | - "--args=--only=terraform_deprecated_index"
14 | - "--args=--only=terraform_unused_declarations"
15 | - "--args=--only=terraform_comment_syntax"
16 | - "--args=--only=terraform_documented_outputs"
17 | - "--args=--only=terraform_documented_variables"
18 | - "--args=--only=terraform_typed_variables"
19 | - "--args=--only=terraform_module_pinned_source"
20 | - "--args=--only=terraform_naming_convention"
21 | - "--args=--only=terraform_required_version"
22 | - "--args=--only=terraform_required_providers"
23 | - "--args=--only=terraform_standard_module_structure"
24 | - "--args=--only=terraform_workspace_remote"
25 | - "--args=--only=terraform_unused_required_providers"
26 | - id: terraform_validate
27 | args:
28 | - "--hook-config=--parallelism-ci-cpu-cores=1"
29 | - repo: https://github.com/pre-commit/pre-commit-hooks
30 | rev: v4.6.0
31 | hooks:
32 | - id: check-merge-conflict
33 | - id: end-of-file-fixer
34 | - id: trailing-whitespace
35 | - id: mixed-line-ending
36 | args: [--fix=lf]
37 |
--------------------------------------------------------------------------------
/.terraform.lock.hcl:
--------------------------------------------------------------------------------
1 | # This file is maintained automatically by "terraform init".
2 | # Manual edits may be lost in future updates.
3 |
4 | provider "registry.terraform.io/hashicorp/aws" {
5 | version = "5.62.0"
6 | constraints = ">= 3.53.0"
7 | hashes = [
8 | "h1:8tevkFG+ea/sNZYiQ2GQ02hknPcWBukxkrpjRCodQC0=",
9 | "zh:1f366cbcda72fb123015439a42ab19f96e10ce4edb404273f4e1b7e06da20b73",
10 | "zh:25f098454a34b483279e0382b24b4f42e51c067222c6e797eda5d3ec33b9beb1",
11 | "zh:4b59d48b527e3cefd73f196853bfc265b3e1e57b55c1c8a2d12ff6e3534b4f07",
12 | "zh:7bb88c1ca95e2b3f0f1fe8636925133b9813fc5b137cc467ba6a233ddf4b360e",
13 | "zh:8a93dece40e816c92647e762839d0370e9cad2aa21dc4ca95baee9385f116459",
14 | "zh:8dfe82c55ab8f633c1e2a39c687e9ca8c892d1c2005bf5166ac396ce868ecd05",
15 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
16 | "zh:a754952d69b4860480d5207390e3ab42350c964dbca9a5ac0c6912dd24b4c11d",
17 | "zh:b2a4dbf4abee0e9ec18c5d323b99defdcd3c681f8c4306fb6e02cff7de038f85",
18 | "zh:b57d84be258b571c04271015f03858ab215768b82e47c11ecd86e789d577030a",
19 | "zh:be811b03289407c8d59e6b199bf16e6071165565ffe502148172d0886cf849c4",
20 | "zh:d4144c7366c840eff1ac15ba13d96063f798f0983d24053a832362033624fe6f",
21 | "zh:d88612856d453c4e10c49c76e4ef522b7d068b4f7c3e2e0b03dd74540986eecd",
22 | "zh:e8bd231a5d0786cc4aab8471bb6dabd5a5df1c598afda077a9f27987ada57b67",
23 | "zh:ffb40a66b4d000a8ee4c54227eeb998f887ad867419c3af7d3981587788de074",
24 | ]
25 | }
26 |
27 | provider "registry.terraform.io/hashicorp/template" {
28 | version = "2.2.0"
29 | constraints = "2.2.0"
30 | hashes = [
31 | "h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=",
32 | "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386",
33 | "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53",
34 | "zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603",
35 | "zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16",
36 | "zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776",
37 | "zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451",
38 | "zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae",
39 | "zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde",
40 | "zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d",
41 | "zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2",
42 | ]
43 | }
44 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 The terraform-docs Authors.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Rabbitmq Cluster (Versão EC2 com Autoscaling)
2 |
3 | ## Arquitetura
4 |
5 | 
6 |
7 | Essa arquitetura tem um diferencial de ter 3 autoscaling pela seguinte razão:
8 |
9 | * Tratamos cada zona de disponibilidade **como ponto de disponibilidade um único nó de rabbitmq**, onde se quisermos escalar mais rabbitmq's, teríamos que adicionar zonas de disponibilidades.
10 |
11 | * Cada Autoscaling cuida de uma az, onde cada Autoscalign suporta somente no máximo um nó, usamos o autoscaling somente pela capacidade dele de **healing** dos nodes em caso de falha a nível de EC2 (podemos colocar a nível de NLB também mas por hora limitamos a EC2).
12 |
13 | * Os serviços que estão rodando em cada instancia utilizam do [Quorum Consensus Protocol](https://www.consul.io/docs/architecture/consensus) e como estamos usando uma disponibilidade inicial de 3 nós, temos capacidade de perder simultaneamente 1 zona de disponibilidade.
14 |
15 | * Os rabbitmq utilizam do [EFS](https://aws.amazon.com/pt/efs/) para persistir as informações de MNESIA (são as informações voláteis que contém dados de filas persistidas e configurações do management por exemplo.), assim, caso um nó precise ser completamente trocado, o novo nó assume sem necessidade de perda dessas informações.
16 |
17 | * As filas estão todas configuradas para trabalhar com "Quorum" para dar maior disponibilidade e garantir maior resiliência do serviço.
18 |
19 | ## Imagem Base
20 |
21 | A receita da ami da imagem base está disponível no [repo de infra-provisioning](https://github.com/belezanaweb/infra-provisioning/tree/master/packer/rabbit-cluster/).
22 |
23 | Utilizamos packer para gerar a imagem e devemos destacar alguns pontos:
24 |
25 | * Utilizamos Amazon Linux 2 para aproveitar bem as features de segurança e suporte do [SSM](https://docs.aws.amazon.com/pt_br/systems-manager/latest/userguide/ssm-agent.html)
26 | * Usamos o repo como base para instalar os pacotes necessários do rabbitmq
27 | * TODO: melhorar o hardening
28 |
29 | ## Processo de Inicialização
30 |
31 | O processo de inicialização segue o modelo descrito no fluxo diagramado abaixo:
32 |
33 | 
34 |
35 | Todo o código está disponível [aqui](./init.sh) onde o template é renderizado com as variáveis definidas no tempo de criação dos recursos e armazenado no template de instancia do AutoscalingGroup.
36 |
37 | O processo de inicialização foi pensado totalmente no caso de resiliência, então ele trabalha nas seguintes hipóteses:
38 |
39 | * Falha de node que precisar restaurar uma az com node problemático
40 | * Rotacionamento de instancia para troca de ami ou atualização de plugins/componentes
41 | * Processo travado que requisitar ação brusca de retirar um nó do cluster para que entre um saudável.
42 |
43 | Se for necessário retirar configurações já feitas no cluster, uma ação mais brusca deve ser feita:
44 |
45 | * entrar em um dos nodes pelo SSM e limpar o conteúdo do diretório `/mnt/efs`, e em seguida realizar rotação dos nodes.
46 | * aplicar terraform destroy, destruindo toda a arquitetura, e em seguida recriando, assim o efs que tem os dados persistidos pode ir embora.
47 |
48 | ## Porque diabos o consul tá no meio disso tudo?
49 |
50 | Resposta: Vozes da minha cabeça que me orientaram!
51 |
52 | 
53 |
54 | Brincadeiras a parte, eis a explicação plausível para o racional.
55 |
56 | ### Problema: problemas usando o discovery da AWS
57 |
58 | Inicialmente partimos do discovery da AWS para resolver a instância, mas deparamos com um problema ao fazer testes de resiliência. Devido ao uso do ip privado como parte do dns fornecido pelo dhcp da rede privada da aws, as máquinas entravam sempre com nome como `rabbit@` onde eles usam o ip interno como parte do nome da instancia na rede. Sendo assim:
59 |
60 | * Se reiniciarmos uma instancia na zona `a` ao invés de recuperarmos o mnesia disponível no `efs` ele criava outra pasta vazia para outro host que era criado com o nome `rabbit@`
61 | * Além de não termos recuperação de forma automatizada do mnesia, o cluster ao invés de voltar a mostrar 3 nós saudáveis ele mostrava que entrava um nó novo `rabbit@` e que o nó `rabbit@` estava perdido.
62 | * Tente imaginar como fica bonito essa visibilidade depois que fizermos uns 3 restarts com rotação dos nodes, teríamos na visão de nodes `9` registros, 3 registros `online` e 6 registros `down` dos nodes.
63 | * A única garantia de HA que teríamos seria as filas configuradas em `quorum`, mas não teríamos HA das configurações de shovel por exemplo.
64 |
65 | Diante desse cenário encontramos 3 soluções:
66 |
67 | * Ação manual após o restart de mover o mnesia pra dentro da pasta correta e o init do rabbitmq não inicializar o serviço, parte do serviço teria que ser manual para mover por exemplo o mnesia para o local correto.
68 | * Intervir no dhcp da AWS e buscar uma solução para que os dns's privados das intancias tivessem um controle "estático".
69 | * Buscar uma outra forma de discovery que pudesse resolver esse problema de dns que respeitasse que um dns de uma az é sempre o mesmo e é definido na inicialização.
70 |
71 | Consideramos a última a mais vantajosa, pois ela é menos invasiva em relação a mudança estrutural (só mudamos no escopo de dentro da solução), e como o processo do consul é bem simples (único binário sem dependencia com nenhuma biblioteca específica), não encontramos impedimentos para implantar.
72 |
73 | O ponto de falha é que assim como o rabbitmq, ele é sujeito ao [Quorum Consensus Protocol](https://www.consul.io/docs/architecture/consensus), então não podemos perder simultaneamente mais de um nó do cluster, e considerando que já não poderíamos dar esse luxo antes, não tivemos uma "perda" com essa escolha.
74 |
75 | Seria melhor se o consul estivesse externo, mas teria mais custo e seria mais complicado de manter, e considerando que ele será usando somente nesse solução por hora, iremos manter dessa forma.
76 |
77 | ### Referências sobre consul
78 |
79 | * [Este artigo sobre service discovery](https://medium.com/trainingcenter/entendendo-orquestradores-parte-1-service-discovery-4219d604335c)
80 | * [Learn Hashicorp Consul](https://learn.hashicorp.com/tutorials/consul/get-started-service-discovery)
81 |
82 | O único diferencial do que foi feito aqui em relação à literatura, foi além das mudanças sugeridas nos passos de [dnsmasq](https://learn.hashicorp.com/tutorials/consul/dns-forwarding#dnsmasq-setup), foi feito adição dessa configuração no `dhclient.conf`:
83 |
84 | ```
85 | prepend domain-name-servers 127.0.0.1;
86 | append domain-name node.;
87 | append domain-name service.;
88 | append domain-name
89 | ```
90 |
91 | Onde domain é o domínio assumido pelo discovery do consul (default: `consul`)
92 |
--------------------------------------------------------------------------------
/alb.tf:
--------------------------------------------------------------------------------
1 | # Create a new network load balancer internal
2 | resource "aws_lb" "this" {
3 | name = local.lb_name
4 | internal = var.is_lb_internal
5 | load_balancer_type = "network"
6 | subnets = var.alb_subnet_ids
7 | enable_cross_zone_load_balancing = true
8 | enable_deletion_protection = false
9 | security_groups = concat(var.additional_sg_lb_ids, aws_security_group.lb.id)
10 |
11 | tags = merge(var.default_tags, {
12 | Account = local.account_alias
13 | Name = local.lb_name
14 | })
15 | }
16 |
17 | # Create Target groups
18 | resource "aws_lb_target_group" "this" {
19 | for_each = local.rabbit_service_ports
20 | name = "tg-${local.lb_name}-${each.key}"
21 | port = each.value.port
22 | protocol = "TCP"
23 | vpc_id = local.vpc_id
24 | health_check {
25 | enabled = true
26 | protocol = "TCP"
27 | }
28 | }
29 |
30 |
31 | # Create Listeners
32 | resource "aws_alb_listener" "this" {
33 | for_each = local.nlb_listener_ports
34 | load_balancer_arn = aws_lb.this.arn
35 | port = each.value.port
36 | protocol = each.value.secure ? "TLS" : "TCP"
37 |
38 | certificate_arn = each.value.certificate_arn
39 | ssl_policy = each.value.ssl_policy
40 |
41 | default_action {
42 | type = "forward"
43 | target_group_arn = aws_lb_target_group.this[each.value.service_port].arn
44 | }
45 | depends_on = [aws_lb_target_group.this]
46 | }
47 |
48 | resource "aws_route53_record" "internal_cname" {
49 | provider = aws.route53_account
50 | zone_id = data.aws_route53_zone.hosted_zone.id
51 | name = "${var.name}.${var.domain_name}"
52 | type = "A"
53 |
54 | alias {
55 | name = aws_lb.this.dns_name
56 | zone_id = aws_lb.this.zone_id
57 | evaluate_target_health = true
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/backend.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 |
6 | provider "aws" {
7 | region = "us-east-1"
8 | alias = "route53_account"
9 | }
10 |
11 |
12 | terraform {
13 | required_version = ">= 1.5"
14 | required_providers {
15 | aws = {
16 | source = "hashicorp/aws"
17 | version = ">= 3.53"
18 | configuration_aliases = [aws.route53_account]
19 | }
20 | template = {
21 | source = "hashicorp/template"
22 | version = "2.2.0"
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/data.tf:
--------------------------------------------------------------------------------
1 | ###################################
2 | ## Data
3 | ###################################
4 | data "aws_iam_account_alias" "this" {}
5 |
6 | data "aws_iam_policy_document" "trust" {
7 | statement {
8 | actions = [
9 | "sts:AssumeRole"
10 | ]
11 | effect = "Allow"
12 | principals {
13 | identifiers = [
14 | "ec2.amazonaws.com"
15 | ]
16 | type = "Service"
17 | }
18 | }
19 | }
20 |
21 | data "aws_secretsmanager_secret" "this" {
22 | arn = var.secret_arn
23 | }
24 |
25 | data "aws_ami" "rabbitmq" {
26 | most_recent = true
27 | name_regex = local.ami_regex
28 | owners = ["self"]
29 | }
30 |
31 | data "aws_vpc" "this" {
32 | id = var.vpc_id
33 | }
34 |
35 | data "aws_subnet" "instances" {
36 | for_each = toset(var.instances_subnet_ids)
37 | id = each.value
38 | }
39 |
40 | data "aws_route53_zone" "hosted_zone" {
41 | provider = aws.route53_account
42 | name = var.domain_name
43 | }
44 |
45 |
46 |
47 |
48 | data "template_file" "init" {
49 | for_each = local.az_with_context
50 | template = file("init.sh")
51 | vars = {
52 | region = var.region
53 | domain = var.consul_domain
54 | cluster_name = var.cluster_name
55 | name = each.value.name
56 | admin_username = "admin"
57 | monitor_username = "monitor"
58 | federation_username = "federation"
59 | tag_key_app = "App"
60 | tag_app = var.name
61 | secret_name = data.aws_secretsmanager_secret.this.id
62 | secret_id_admin_password = local.secret_ids.admin_password
63 | secret_id_terraform_password = local.secret_ids.terraform_password
64 | secret_id_monitor_password = local.secret_ids.monitor_password
65 | secret_id_federation_password = local.secret_ids.federation_password
66 | secret_id_cookie_string = local.secret_ids.cookie_string
67 | secret_id_newrelic_key = local.secret_ids.newrelic_key
68 | filesystem_id = aws_efs_file_system.rabbit_data.id
69 | ecr_repo_dns = local.ecr_repo_dns
70 | ecr_region = local.ecr_region
71 | rabbitmq_image_url = var.rabbit_image_url
72 | rabbitmq_delayedmessage_version = var.rabbit_delayedmessage_version
73 | }
74 | }
75 |
76 | data "aws_iam_policy_document" "secret_manager_ronly_crypt" {
77 | for_each = local.has_kms
78 | statement {
79 | effect = "Allow"
80 | actions = [
81 | "kms:Decrypt"
82 | ]
83 | resources = [
84 | var.secret_kms_arn
85 | ]
86 | }
87 | statement {
88 | effect = "Allow"
89 | actions = [
90 | "secretsmanager:GetSecretValue"
91 | ]
92 | resources = [
93 | var.secret_arn
94 | ]
95 | }
96 | }
97 |
98 | data "aws_iam_policy_document" "secret_manager_ronly" {
99 | for_each = local.does_not_have_kms
100 | statement {
101 | effect = "Allow"
102 | actions = [
103 | "secretsmanager:GetSecretValue"
104 | ]
105 | resources = [
106 | var.secret_arn
107 | ]
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/docs-resources/rabbitmq-init-process.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/claytonsilva/terraform-aws-rabbitmq-ec2/0faf5cb931b1e6ed14fb6639cf08c753f22abd11/docs-resources/rabbitmq-init-process.png
--------------------------------------------------------------------------------
/docs-resources/rabbitmq_blueprint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/claytonsilva/terraform-aws-rabbitmq-ec2/0faf5cb931b1e6ed14fb6639cf08c753f22abd11/docs-resources/rabbitmq_blueprint.png
--------------------------------------------------------------------------------
/efs.tf:
--------------------------------------------------------------------------------
1 | #tfsec:ignore:AWS048
2 | resource "aws_efs_file_system" "rabbit_data" {
3 | tags = var.default_tags
4 | }
5 |
6 | resource "aws_efs_mount_target" "alpha" {
7 | for_each = local.az_with_context
8 | file_system_id = aws_efs_file_system.rabbit_data.id
9 | subnet_id = each.value.subnet_ids[0]
10 | security_groups = [aws_security_group.efs.id]
11 | }
12 |
13 | resource "aws_efs_backup_policy" "this" {
14 | file_system_id = aws_efs_file_system.rabbit_data.id
15 |
16 | backup_policy {
17 | status = "ENABLED"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/iam.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "this" {
2 | name = local.role_name
3 | assume_role_policy = data.aws_iam_policy_document.trust.json
4 | tags = merge(
5 | {
6 | Name = local.role_name
7 | },
8 | var.default_tags
9 | )
10 | }
11 |
12 | resource "aws_iam_role_policy" "secret_manager_ronly" {
13 | role = aws_iam_role.this.name
14 | name = "secret_manager_ronly"
15 | policy = local.secret_policy_document
16 | }
17 |
18 | resource "aws_iam_role_policy_attachment" "ssm" {
19 | policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
20 | role = aws_iam_role.this.name
21 | }
22 |
23 | resource "aws_iam_role_policy_attachment" "cloudwatch_logs" {
24 | policy_arn = "arn:aws:iam::350085234395:policy/CloudWatchLogsRole"
25 | role = aws_iam_role.this.name
26 | }
27 |
28 | resource "aws_iam_role_policy_attachment" "ec2ronly" {
29 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess"
30 | role = aws_iam_role.this.name
31 | }
32 |
33 | resource "aws_iam_role_policy_attachment" "ecrronly" {
34 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
35 | role = aws_iam_role.this.name
36 | }
37 |
38 | resource "aws_iam_instance_profile" "this" {
39 | name = local.role_name
40 | role = aws_iam_role.this.name
41 | }
42 |
--------------------------------------------------------------------------------
/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -xe
2 |
3 | echo "Become root user"
4 |
5 | sudo su -
6 |
7 | hostnamectl set-hostname ${name}
8 |
9 | ####
10 | # EFS Configuration
11 | ####
12 | mkdir -p /mnt/efs
13 | mount -t efs ${filesystem_id} /mnt/efs
14 |
15 | DIR="/mnt/efs/${name}"
16 | if [ ! -d "$DIR" ]; then
17 | # Take action if $DIR exists. #
18 | echo "empty dir $DIR creating then"
19 | mkdir -p $DIR
20 | chown -R 999:999 $DIR
21 | fi
22 |
23 | echo "${filesystem_id}.efs.${region}.amazonaws.com:/ /mnt/efs nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev 0 0" >>/etc/fstab
24 |
25 | ####
26 | # AWS Secret manager configuration
27 | # Get secret data and store in variables
28 | ####
29 | ADMIN_PASSWORD=$(AWS_DEFAULT_REGION=${region} aws secretsmanager get-secret-value --secret-id ${secret_name} | jq -r '.SecretString' | jq -r '."${secret_id_admin_password}"')
30 | FEDERATION_PASSWORD=$(AWS_DEFAULT_REGION=${region} aws secretsmanager get-secret-value --secret-id ${secret_name} | jq -r '.SecretString' | jq -r '."${secret_id_federation_password}"')
31 | MONITOR_PASSWORD=$(AWS_DEFAULT_REGION=${region} aws secretsmanager get-secret-value --secret-id ${secret_name} | jq -r '.SecretString' | jq -r '."${secret_id_monitor_password}"')
32 | COOKIE_STRING=$(AWS_DEFAULT_REGION=${region} aws secretsmanager get-secret-value --secret-id ${secret_name} | jq -r '.SecretString' | jq -r '."${secret_id_cookie_string}"')
33 |
34 | ######################################################################
35 | ## consul configuration
36 | #
37 | # consul is a service discovery planned to reduce problem with privateDNS used by AWS discovery
38 | # on rabbitmq
39 | #
40 | ######################################################################
41 |
42 | # basic directories
43 | mkdir -p /opt/consul/data
44 | mkdir -p /etc/consul.d
45 |
46 | LAN_ADDRESS=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
47 |
48 | # files from consul configuration
49 | echo -e "LAN_NODENAME=${name}
50 | LAN_ADDRESS=$LAN_ADDRESS" >/etc/environment
51 |
52 | echo -e "{
53 | \"server\": true,
54 | \"bootstrap_expect\": 3,
55 | \"acl_default_policy\": \"allow\",
56 | \"addresses\": {
57 | \"dns\": \"0.0.0.0\",
58 | \"grpc\": \"0.0.0.0\",
59 | \"http\": \"0.0.0.0\",
60 | \"https\": \"0.0.0.0\"
61 | },
62 | \"client_addr\": \"0.0.0.0\",
63 | \"connect\": {
64 | \"enabled\": false
65 | },
66 | \"data_dir\": \"/opt/consul/data\",
67 | \"datacenter\": \"${cluster_name}\",
68 | \"disable_update_check\": true,
69 | \"domain\": \"${domain}\",
70 | \"enable_script_checks\": true,
71 | \"enable_syslog\": true,
72 | \"log_level\": \"INFO\",
73 | \"performance\": {
74 | \"leave_drain_time\": \"5s\",
75 | \"raft_multiplier\": 1,
76 | \"rpc_hold_timeout\": \"7s\"
77 | },
78 | \"ports\": {
79 | \"dns\": 8600,
80 | \"http\": 8500,
81 | \"server\": 8300
82 | },
83 | \"raft_protocol\": 3,
84 | \"syslog_facility\": \"local0\",
85 | \"ui_config\": {
86 | \"enabled\": true
87 | }
88 | }" >/etc/consul.d/base.json
89 |
90 | echo -e "{
91 | \"retry_interval\": \"30s\",
92 | \"retry_interval_wan\": \"30s\",
93 | \"retry_join\": [\"provider=aws tag_key=${tag_key_app} tag_value=${tag_app}\"],
94 | \"retry_max\": 0,
95 | \"retry_max_wan\": 0
96 | }" >/etc/consul.d/join.json
97 |
98 | echo -e "[Unit]
99 | Description=\"HashiCorp Consul\"
100 | Documentation=https://www.consul.io/
101 | Requires=systemd-networkd.service
102 | After=systemd-networkd.service
103 | ConditionFileNotEmpty=/etc/consul.d/join.json
104 |
105 | [Service]
106 | Type=simple
107 | EnvironmentFile=/etc/environment
108 | ExecStart=/usr/bin/consul agent -config-dir=/etc/consul.d/ -advertise-wan \$LAN_ADDRESS -bind \$LAN_ADDRESS -advertise \$LAN_ADDRESS -node \$LAN_NODENAME
109 | ExecReload=/usr/bin/consul reload
110 | Restart=on-failure
111 | KillMode=process
112 | LimitNOFILE=65536" >/etc/systemd/system/consul.service
113 |
114 | ### systemd-resolved configuration
115 | iptables --table nat --append OUTPUT --destination localhost --protocol udp --match udp --dport 53 --jump REDIRECT --to-ports 8600
116 | iptables --table nat --append OUTPUT --destination localhost --protocol tcp --match tcp --dport 53 --jump REDIRECT --to-ports 8600
117 |
118 | echo -e "[Resolve]
119 | DNS=127.0.0.1
120 | DNSSEC=false
121 | Domains=${domain} node.${domain} service.${domain}" >>/etc/systemd/resolved.conf
122 | systemctl restart systemd-resolved
123 |
124 | echo "Enabling Consul"
125 | systemctl daemon-reload
126 | systemctl enable consul
127 |
128 | echo "Starting Consul"
129 | systemctl start consul
130 |
131 | ######################################################################
132 | ## rabbitmq configuration
133 | ##
134 | ######################################################################
135 |
136 | mkdir -p /etc/rabbitmq
137 |
138 | echo -e "[Install]
139 | WantedBy=multi-user.target
140 |
141 | [Unit]
142 | Description=Rabbitmq Container
143 | After=docker.service consul.service
144 | Requires=docker.service consul.service
145 |
146 | [Service]
147 | EnvironmentFile=/etc/environment
148 | TimeoutStartSec=0
149 | Restart=always
150 | ExecStartPre=/usr/bin/bash -c \"aws ecr get-login-password --region ${ecr_region} | docker login --username AWS --password-stdin ${ecr_repo_dns}\"
151 | ExecStartPre=-/usr/bin/docker stop %n
152 | ExecStartPre=-/usr/bin/docker rm %n
153 | ExecStartPre=/usr/bin/docker pull ${rabbitmq_image_url}
154 | ExecStart=/usr/bin/docker run --rm --name %n --net=host -v /var/log/rabbitmq:/var/log/rabbitmq -v /var/lib/rabbitmq:/var/lib/rabbitmq -v /mnt/efs:/mnt/efs -v /etc/rabbitmq:/etc/rabbitmq -p 5672:5672 -p 15672:15672 --ulimit nofile=32768:32768 ${rabbitmq_image_url}
155 | ExecStartPost=sleep 5
156 | [Install]
157 | WantedBy=multi-user.target" >/etc/systemd/system/rabbitmq-server.service
158 |
159 | echo "Create config file - rabbitmq.conf"
160 |
161 | echo -e "cluster_name = ${cluster_name}
162 | cluster_formation.peer_discovery_backend = consul
163 | cluster_formation.consul.host = localhost
164 | cluster_formation.consul.svc_addr_use_nodename = true
165 | cluster_formation.consul.use_longname = false
166 | cluster_formation.consul.svc_addr_auto = true
167 | cluster_formation.consul.port = 8500
168 | cluster_formation.consul.scheme = http
169 | cluster_formation.consul.svc = rabbitmq
170 | cluster_formation.consul.svc_ttl = 30
171 | cluster_formation.consul.deregister_after = 60
172 | management.tcp.port = 15672
173 | listeners.tcp.1 = 0.0.0.0:5672
174 | log.console = true
175 | log.console.level = info
176 | log.file = instance.log
177 | log.file.level = info
178 | log.file.formatter = json" >/etc/rabbitmq/rabbitmq.conf
179 |
180 | NODENAME=${name}
181 |
182 | echo -e "NODENAME=rabbit@${name}
183 | MNESIA_BASE=/mnt/efs/${name}
184 | PLUGINS_DIR=/opt/rabbitmq/plugins:/var/lib/rabbitmq/plugins
185 | MNESIA_DIR=/mnt/efs/${name}/node" >/etc/rabbitmq/rabbitmq-env.conf
186 |
187 | echo -e "[rabbitmq_peer_discovery_consul]." >/etc/rabbitmq/enabled_plugins
188 |
189 | ### install packages plugins and set configuration for then
190 | echo "Config token file"
191 | mkdir -p /var/lib/rabbitmq
192 | mkdir -p /var/lib/rabbitmq/plugins
193 | echo $COOKIE_STRING >/var/lib/rabbitmq/.erlang.cookie
194 | wget -c https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/${rabbitmq_delayedmessage_version}/rabbitmq_delayed_message_exchange-${rabbitmq_delayedmessage_version}.ez -P /var/lib/rabbitmq/plugins
195 | chown -R 999:999 /var/lib/rabbitmq
196 | chown -R 999:999 /etc/rabbitmq
197 | chmod a-r /var/lib/rabbitmq/.erlang.cookie
198 | chmod u+r /var/lib/rabbitmq/.erlang.cookie
199 |
200 | ### add log folder
201 | mkdir -p /var/log/rabbitmq
202 | chown -R 999:999 /var/log/rabbitmq
203 |
204 | echo "Enabling Rabbitmq Server"
205 | systemctl enable rabbitmq-server
206 |
207 | echo "Starting RabbitMQ"
208 | systemctl start rabbitmq-server
209 |
210 | # wait startup node for next commands
211 | alias rabbitmqctl="docker exec rabbitmq-server.service rabbitmqctl $1"
212 | RABBITMQCTL_CMD="docker exec rabbitmq-server.service rabbitmqctl"
213 | alias rabbitmq-plugins="docker exec rabbitmq-server.service rabbitmq-plugins $1"
214 | RABBITMQCTL_PLUGINS_CMD="docker exec rabbitmq-server.service rabbitmq-plugins"
215 |
216 | $RABBITMQCTL_CMD await_startup
217 |
218 | # enable plugins
219 | $RABBITMQCTL_PLUGINS_CMD enable rabbitmq_peer_discovery_consul rabbitmq_delayed_message_exchange rabbitmq_management rabbitmq_management_agent rabbitmq_shovel rabbitmq_shovel_management rabbitmq_top rabbitmq_tracing rabbitmq_web_dispatch rabbitmq_amqp1_0 rabbitmq_federation rabbitmq_federation_management
220 |
221 | EXISTS_RABBITMQ_ADMIN=$($RABBITMQCTL_CMD list_users --formatter json | jq -c '.[] | select(.user | contains("${admin_username}"))' | jq '.user')
222 |
223 | # Create admin user
224 | if [ "$EXISTS_RABBITMQ_ADMIN" == "" ]; then
225 | $RABBITMQCTL_CMD add_user ${admin_username} $ADMIN_PASSWORD
226 | $RABBITMQCTL_CMD set_user_tags ${admin_username} administrator
227 | $RABBITMQCTL_CMD set_permissions -p / ${admin_username} ".*" ".*" ".*"
228 | fi
229 |
230 | EXISTS_RABBITMQ_MONITOR=$($RABBITMQCTL_CMD list_users --formatter json | jq -c '.[] | select(.user | contains("${monitor_username}"))' | jq '.user')
231 |
232 | # Create monitor user
233 | if [ "$EXISTS_RABBITMQ_MONITOR" == "" ]; then
234 | $RABBITMQCTL_CMD add_user ${monitor_username} $MONITOR_PASSWORD
235 | $RABBITMQCTL_CMD set_user_tags ${monitor_username} monitoring
236 | $RABBITMQCTL_CMD set_permissions -p / ${monitor_username} ".*" ".*" ".*"
237 | fi
238 |
239 | EXISTS_RABBITMQ_FEDERATION=$($RABBITMQCTL_CMD list_users --formatter json | jq -c '.[] | select(.user | contains("${federation_username}"))' | jq '.user')
240 |
241 | # Create federation user
242 | if [ "$EXISTS_RABBITMQ_FEDERATION" == "" ]; then
243 | $RABBITMQCTL_CMD add_user ${federation_username} $FEDERATION_PASSWORD
244 | $RABBITMQCTL_CMD set_user_tags ${federation_username} administrator
245 | $RABBITMQCTL_CMD set_permissions -p / ${federation_username} ".*" ".*" ".*"
246 | fi
247 |
248 | ####
249 | # logrotate configuration
250 | ####
251 |
252 | echo -e "
253 | /var/log/rabbitmq/*.log {
254 | rotate 0
255 | daily
256 | size 50M
257 | maxsize 50M
258 | }" >/etc/logrotate.d/rabbitmq
259 |
260 | echo -e "
261 | @daily root logrotate /etc/logrotate.conf
262 | @daily root docker exec rabbitmq-server.service rabbitmqctl export_definitions \"/mnt/efs/${name}.json\"
263 | @daily root journalctl --vacuum-time=3days" >/etc/crontab
264 |
265 | crontab /etc/crontab
266 |
267 | systemctl enable crond
268 | systemctl start crond
269 |
--------------------------------------------------------------------------------
/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 |
3 | account_alias = data.aws_iam_account_alias.this.account_alias
4 | lb_name = "nlb-${var.name}"
5 | az_with_context = {
6 | for k in data.aws_subnet.instances : k.availability_zone => {
7 | name = "${var.name}-${k.availability_zone}"
8 | subnet_ids = [k.id]
9 | }
10 | }
11 |
12 | has_kms = var.secret_kms_arn != "" ? toset(["this"]) : toset()
13 | does_not_have_kms = var.secret_kms_arn == "" ? toset(["this"]) : toset()
14 |
15 |
16 | role_name = var.role_name == "" ? var.name : var.role_name
17 | key_name = var.key_name == "" ? null : var.key_name
18 | secret_policy_document = length(local.has_kms) > 0 ? data.aws_iam_policy_document.secret_manager_ronly_crypt["this"].json : data.aws_iam_policy_document.secret_manager_ronly["this"].json
19 |
20 | vpc_id = data.aws_vpc.this.id
21 | vpc_cidr = data.aws_vpc.this.cidr_block
22 |
23 | nlb_listener_ports = {
24 | "management" = { description = "Management HTTP", port = "80", secure = false, ssl_policy = null, certificate_arn = null, service_port = "consul" }
25 | "https" = { description = "HTTPS", port = "443", secure = true, ssl_policy = var.default_ssl_policy, certificate_arn = var.certificate_arn, service_port = "management" }
26 | "amqps" = { description = "Amqps Port", port = "5671", secure = true, ssl_policy = var.default_ssl_policy, certificate_arn = var.certificate_arn, service_port = "amqp" }
27 | }
28 |
29 | nlb_service_ports_with_azs = tomap({
30 | for az_service_port in flatten([
31 | for k, v in local.az_with_context : [
32 | for inner_k, inner_v in local.nlb_listener_ports : {
33 | id = "${k}-${inner_k}"
34 | zone_id = k
35 | service_port = inner_v.service_port
36 | }
37 | ]
38 | ]) : az_service_port.id => az_service_port
39 | })
40 |
41 | rabbit_service_ports = {
42 | "management" = { description = "Rabbitmq Management HTTP", port = "15672" }
43 | "amqp" = { description = "Amqp Port", port = "5672", }
44 | "amqps" = { description = "Amqps Port", port = "5671", }
45 | "consul" = { description = "Consul Default", port = "8500", }
46 | }
47 |
48 | ami_regex = var.ami_regex == "" ? "^${var.name}-" : var.ami_regex
49 |
50 | secret_ids = {
51 | admin_password = "admin-password"
52 | monitor_password = "monitor-password"
53 | terraform_password = "terraform-password"
54 | cookie_string = "cookie"
55 | federation_password = "federation-password"
56 | newrelic_key = "nri-infrastructure-key"
57 | }
58 |
59 | ### extracting extra infos from ecr repo pattern
60 | ecr_repo_dns = split("/", var.rabbit_image_url)[0]
61 | ecr_region = split(".", var.rabbit_image_url)[3]
62 | }
63 |
--------------------------------------------------------------------------------
/main.tf:
--------------------------------------------------------------------------------
1 | resource "aws_launch_template" "rabbit_per_az" {
2 | for_each = local.az_with_context
3 | name = each.value.name
4 | image_id = data.aws_ami.rabbitmq.image_id
5 | instance_type = var.instance_type
6 | vpc_security_group_ids = concat(var.additional_sg_instances_ids, aws_security_group.main.id)
7 | user_data = base64encode(data.template_file.init[each.key].rendered)
8 | key_name = local.key_name
9 |
10 | metadata_options {
11 | http_endpoint = "enabled"
12 | http_tokens = "optional"
13 | }
14 |
15 | iam_instance_profile {
16 | name = var.instance_profile
17 | }
18 |
19 | tag_specifications {
20 | resource_type = "instance"
21 | tags = merge(var.default_tags, {
22 | Account = local.account_alias
23 | Name = each.value.name
24 | })
25 | }
26 |
27 | tag_specifications {
28 | resource_type = "volume"
29 | tags = merge(var.default_tags, {
30 | Account = local.account_alias
31 | Name = each.value.name
32 | })
33 | }
34 |
35 | tags = merge(var.default_tags, {
36 | App = var.name
37 | })
38 | }
39 |
40 | resource "aws_autoscaling_group" "rabbit_per_az" {
41 | for_each = local.az_with_context
42 | name = each.value.name
43 | max_size = 1
44 | min_size = 1
45 | health_check_grace_period = 180
46 | health_check_type = "EC2"
47 | desired_capacity = 1
48 | force_delete = true
49 |
50 | launch_template {
51 | id = aws_launch_template.rabbit_per_az[each.key].id
52 | version = "$Latest"
53 | }
54 |
55 | vpc_zone_identifier = each.value.subnet_ids
56 |
57 | lifecycle {
58 | ignore_changes = [load_balancers, target_group_arns]
59 | }
60 | }
61 |
62 | # Create Target Groups Attachments
63 | resource "aws_autoscaling_attachment" "this" {
64 | for_each = local.nlb_service_ports_with_azs
65 | autoscaling_group_name = aws_autoscaling_group.rabbit_per_az[each.value.az].id
66 | lb_target_group_arn = aws_lb_target_group.this[each.value.service_port].arn
67 | }
68 |
--------------------------------------------------------------------------------
/outputs.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/claytonsilva/terraform-aws-rabbitmq-ec2/0faf5cb931b1e6ed14fb6639cf08c753f22abd11/outputs.tf
--------------------------------------------------------------------------------
/packer/ansible/builder.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: default
3 | become: true
4 |
5 | tasks:
6 | - name: Setting Timezone
7 | shell: rm -f /etc/localtime; ln -s /usr/share/zoneinfo/Etc/GMT+3 /etc/localtime
8 |
9 | - name: Installing base repos for package repo manager
10 | yum:
11 | name: "{{ packages }}"
12 | state: present
13 | lock_timeout: 120
14 | vars:
15 | packages:
16 | - yum-utils
17 | - shadow-utils
18 |
19 | - name: Config repo to install consul agent
20 | shell: |
21 | sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
22 |
23 | - name: Update Yum repos
24 | yum:
25 | name: "*"
26 | state: latest
27 | - name: Installing repos
28 | yum:
29 | name: "{{ packages }}"
30 | state: present
31 | lock_timeout: 120
32 | vars:
33 | packages:
34 | - amazon-efs-utils
35 | - docker
36 | - dnsmasq
37 | - jq
38 | - consul
39 | - cronie
40 |
--------------------------------------------------------------------------------
/packer/env.pkrvars.hcl.example:
--------------------------------------------------------------------------------
1 | default_tags = {}
2 | vpc_id = "vpc-asdf"
3 | subnet_id = "subnet-asdf"
4 | region = "us-east-1"
5 |
--------------------------------------------------------------------------------
/packer/init.pkr.hcl:
--------------------------------------------------------------------------------
1 | packer {
2 | required_plugins {
3 | amazon = {
4 | source = "github.com/hashicorp/amazon"
5 | version = "~> 1"
6 | }
7 | ansible = {
8 | version = "~> 1"
9 | source = "github.com/hashicorp/ansible"
10 | }
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/packer/rabbit.pkr.hcl:
--------------------------------------------------------------------------------
1 | source "amazon-ebs" "amazon" {
2 | region = var.region
3 | source_ami = var.source_ami
4 | ami_virtualization_type = "hvm"
5 | instance_type = "t4g.medium"
6 | ssh_username = "ec2-user"
7 | ami_name = "rabbitmq-{{timestamp}}"
8 | launch_block_device_mappings {
9 | device_name = "/dev/xvda"
10 | volume_size = 20
11 | volume_type = "gp3"
12 | delete_on_termination = true
13 | throughput = 300
14 | iops = 3000
15 | }
16 | tags = var.default_tags
17 | run_tags = var.default_tags
18 | subnet_filter {
19 | filters = {
20 | "vpc-id" : var.vpc_id,
21 | "subnet-id" : var.subnet_id
22 | }
23 | }
24 | }
25 |
26 | build {
27 | sources = ["source.amazon-ebs.amazon"]
28 | provisioner "ansible" {
29 | # type = "ansible"
30 | playbook_file = "./ansible/builder.yml"
31 | ansible_env_vars = [
32 | "no_proxy=\"*\""
33 | ]
34 | extra_arguments = [
35 | "--scp-extra-args",
36 | "'-O'"
37 | ]
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/packer/variables.pkr.hcl:
--------------------------------------------------------------------------------
1 | variable "default_tags" {
2 | description = "tags used in application running"
3 | type = map(string)
4 | default = {}
5 | }
6 | variable "vpc_id" {
7 | description = "VPC ID"
8 | type = string
9 | }
10 | variable "subnet_id" {
11 | description = "Subnet ID"
12 | type = string
13 | }
14 | variable "region" {
15 | description = "region"
16 | type = string
17 | }
18 | variable "source_ami" {
19 | description = "value"
20 | type = string
21 | default = "ami-07ce5684ee3b5482c"
22 | }
23 | variable "instance_type" {
24 | description = "instance type"
25 | type = string
26 | default = "t4g.medium"
27 | }
28 |
--------------------------------------------------------------------------------
/security-group.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "lb" {
2 | name = "${var.name}-nlb-internal"
3 | description = "Security Group RabbitMQ Cluster (nlb internal)"
4 | vpc_id = local.vpc_id
5 |
6 | egress {
7 | cidr_blocks = ["0.0.0.0/0"]
8 | protocol = "-1"
9 | from_port = 0
10 | to_port = 0
11 | description = "Enable All Internet Traffic"
12 | }
13 |
14 | tags = merge(var.default_tags, {
15 | Account = local.account_alias
16 | Name = var.name
17 | })
18 |
19 | lifecycle {
20 | ignore_changes = [ingress]
21 | }
22 | }
23 |
24 |
25 | resource "aws_security_group" "main" {
26 | name = var.name
27 | description = "Security Group RabbitMQ Cluster"
28 | vpc_id = local.vpc_id
29 |
30 | egress {
31 | cidr_blocks = ["0.0.0.0/0"]
32 | protocol = "-1"
33 | from_port = 0
34 | to_port = 0
35 | description = "Enable All Internet Traffic"
36 | }
37 |
38 | tags = merge(var.default_tags, {
39 | Account = local.account_alias
40 | Name = var.name
41 | })
42 |
43 | lifecycle {
44 | ignore_changes = [ingress]
45 | }
46 | }
47 |
48 | ### internal comm between nodes from cluster
49 | resource "aws_security_group_rule" "enable_internal_comm" {
50 | type = "ingress"
51 | security_group_id = aws_security_group.main.id
52 | self = true
53 | protocol = "-1"
54 | from_port = 0
55 | to_port = 0
56 | description = "Allow inter node traffic"
57 | }
58 |
59 | ### internal comm between nlb and ec2
60 | resource "aws_security_group_rule" "enable_ports_to_internal_nlb" {
61 | for_each = local.rabbit_service_ports
62 | type = "ingress"
63 | security_group_id = aws_security_group.main.id
64 | source_security_group_id = aws_security_group.lb.id
65 | protocol = "tcp"
66 | from_port = each.value.port
67 | to_port = each.value.port
68 | description = "Allow traffic to rabbitmq - from inner VPC to internal nlb - ${each.key}"
69 | }
70 |
71 |
72 | ### internal comm between nlb and local vpc
73 | resource "aws_security_group_rule" "enable_ports_vpc" {
74 | for_each = local.nlb_listener_ports
75 | type = "ingress"
76 | security_group_id = aws_security_group.lb.id
77 | cidr_blocks = [local.vpc_cidr]
78 | protocol = "tcp"
79 | from_port = each.value.port
80 | to_port = each.value.port
81 | description = "Allow traffic to rabbitmq - from inner VPC - ${each.key}"
82 | }
83 |
84 | #### security group for efs mount
85 | resource "aws_security_group" "efs" {
86 | name = "${var.name}-efs"
87 | description = "Security Group EFS File system for rabbit cluster"
88 | vpc_id = local.vpc_id
89 |
90 | tags = merge(var.default_tags, {
91 | Account = local.account_alias
92 | Name = "${var.name}-efs"
93 | })
94 | }
95 |
96 |
97 | resource "aws_security_group_rule" "enable_comm_from_rabbit_cluster" {
98 | type = "ingress"
99 | security_group_id = aws_security_group.efs.id
100 | source_security_group_id = aws_security_group.main.id
101 | protocol = "TCP"
102 | from_port = 2049 # efs mount port
103 | to_port = 2049 # efs mount port
104 | description = "Allow traffic to rabbitmq instances"
105 | }
106 |
--------------------------------------------------------------------------------
/terraform.tfvars.example:
--------------------------------------------------------------------------------
1 | name = "rabbitmq-cluster"
2 | ami_regex = "rabbitmq"
3 | instance_profile = "rabbitmq-cluster"
4 | key_name = ""
5 | secret_arn = "arn:aws:secretsmanager:xxx:xxxx:secret:secret"
6 | secret_kms_arn = "arn:aws:kms:xxx:xxxx:key/kms"
7 | region = "us-east-1"
8 | hosted_zone = "acme.org"
9 | domain_name = "dev.acme.org"
10 | certificate_arn = "arn:aws:acm:xxx:zzzz:certificate/acmexxxx"
11 | vpc_id = "vpc-xxxx"
12 | default_tags = {}
13 | rabbit_image_url = "rabbitmq:3.11.19-management"
14 | rabbit_delayedmessage_version = "3.11.1"
15 |
--------------------------------------------------------------------------------
/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "name of the group resource"
3 | type = string
4 | default = "rabbitmq-cluster"
5 | }
6 | variable "region" {
7 | description = "region of the resources"
8 | type = string
9 | }
10 | variable "key_name" {
11 | description = "key used as fallback of the ssm access in instances"
12 | type = string
13 | default = ""
14 | }
15 | variable "instance_profile" {
16 | description = "instance profile used in ec2 resources"
17 | type = string
18 | }
19 | variable "instance_type" {
20 | description = "type of the instance of rabbitmq node"
21 | type = string
22 | default = "t4g.small"
23 | }
24 | variable "consul_domain" {
25 | description = "internal domain used in consul cluster"
26 | type = string
27 | default = "consul"
28 | }
29 | variable "cluster_name" {
30 | description = "name of the cluster"
31 | type = string
32 | default = "rabbitmq-cluster"
33 | }
34 | variable "ami_regex" {
35 | description = "regex to find the ami of the rabbitmq instances"
36 | type = string
37 | default = "rabbitmq"
38 | }
39 | variable "role_name" {
40 | description = "name of the role created by rabbitmq"
41 | type = string
42 | }
43 | variable "secret_arn" {
44 | description = "arn of the secret used in the rabbitmq solution"
45 | type = string
46 | }
47 | variable "secret_kms_arn" {
48 | description = "arn of the kms used in secret manager"
49 | type = string
50 | }
51 | variable "vpc_id" {
52 | description = "id of the vpc used in cluster"
53 | type = string
54 | }
55 | variable "default_tags" {
56 | description = "Default tags"
57 | type = map(string)
58 | }
59 | variable "certificate_arn" {
60 | description = "ARN of the certificate on AWS ACM tho attach with the load balancer"
61 | type = string
62 | }
63 | variable "domain_name" {
64 | description = "domain name used by the cluster (we will find this domain in route53)"
65 | type = string
66 | }
67 | variable "rabbit_image_url" {
68 | description = "rabbitmq image url from docker or custom index"
69 | type = string
70 | default = "3.13-management-alpine"
71 | }
72 | variable "rabbit_delayedmessage_version" {
73 | description = "version of the delayed message to be installed"
74 | type = string
75 | }
76 | variable "instances_subnet_ids" {
77 | type = set(string)
78 | description = "set of subnet id's to be used in RabbitMQ instances, to work correctly, we must fill with one subnet per az, and the length of the subnet must be 3"
79 | }
80 | variable "alb_subnet_ids" {
81 | type = set(string)
82 | description = "subnets to be used in ALB"
83 | }
84 | variable "default_ssl_policy" {
85 | type = string
86 | description = "default ssl policy used in SSL communications"
87 | default = "ELBSecurityPolicy-TLS13-1-2-2021-06"
88 | }
89 | variable "is_lb_internal" {
90 | type = bool
91 | description = "define if the load balancer is internal or external"
92 | default = true
93 | }
94 | variable "additional_sg_instances_ids" {
95 | # add check for only 5 security groups
96 | description = "aditional security group id's to add directly into instance in AutoScaling Group"
97 | type = set(string)
98 | default = []
99 | }
100 |
101 | variable "additional_sg_lb_ids" {
102 | # add check for only 5 security groups
103 | description = "aditional security group id's to add directly into load balancer"
104 | type = set(string)
105 | default = []
106 | }
107 |
--------------------------------------------------------------------------------