├── .gitignore ├── LICENSE ├── README.md ├── centralized_egress_dual_stack_full_mesh_trio_demo ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── blackhole_cidrs.tf ├── centralized_router_use1.tf ├── centralized_router_use2.tf ├── centralized_router_usw2.tf ├── full_mesh_trio.tf ├── providers.tf ├── security_group_rules.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf ├── vpc_peering.tf ├── vpcs_use1.tf ├── vpcs_use2.tf └── vpcs_usw2.tf ├── dual_stack_full_mesh_trio_demo ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── blackhole_cidrs.tf ├── centralized_router_use1.tf ├── centralized_router_use2.tf ├── centralized_router_usw2.tf ├── full_mesh_trio.tf ├── providers.tf ├── security_group_rules.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf ├── vpc_peering.tf ├── vpcs_use1.tf ├── vpcs_use2.tf └── vpcs_usw2.tf ├── dual_stack_networking_trifecta_demo ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── centralized_router.tf ├── instances.tf ├── providers.tf ├── scripts │ └── get_instance_info.sh ├── security_group_rules.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf └── vpcs.tf ├── full_mesh_trio_demo ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── blackhole_cidrs.tf ├── centralized_router_use1.tf ├── centralized_router_use2.tf ├── centralized_router_usw2.tf ├── full_mesh_trio.tf ├── providers.tf ├── security_group_rules.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf ├── vpc_peering.tf ├── vpcs_use1.tf ├── vpcs_use2.tf └── vpcs_usw2.tf ├── mega_mesh_demo ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── centralized_router_apne1.tf ├── centralized_router_apse1.tf ├── centralized_router_cac1.tf ├── centralized_router_euc1.tf ├── centralized_router_euw1.tf ├── centralized_router_sae1.tf ├── centralized_router_use1.tf ├── centralized_router_use2.tf ├── centralized_router_usw1.tf ├── centralized_router_usw2.tf ├── mega_mesh.tf ├── providers.tf ├── security_group_rules.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf ├── vpcs_apne1.tf ├── vpcs_apse1.tf ├── vpcs_cac1.tf ├── vpcs_euc1.tf ├── vpcs_euw1.tf ├── vpcs_sae1.tf ├── vpcs_use1.tf ├── vpcs_use2.tf ├── vpcs_usw1.tf └── vpcs_usw2.tf ├── networking_trifecta_demo ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── centralized_router.tf ├── instances.tf ├── providers.tf ├── scripts │ └── get_instance_info.sh ├── security_group_rules.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf └── vpcs.tf └── super_router_demo ├── .terraform-version ├── .terraform.lock.hcl ├── Readme.md ├── blackhole_cidrs.tf ├── centralized_routers_use1.tf ├── centralized_routers_usw2.tf ├── providers.tf ├── security_group_rules.tf ├── super_router_usw2_to_use1.tf ├── variables.tf ├── versions.tf ├── vpc_endpoints.tf ├── vpcs_use1.tf └── vpcs_usw2.tf /.gitignore: -------------------------------------------------------------------------------- 1 | Local .terraform directories 2 | **/.terraform/* 3 | 4 | .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | Crash log files 9 | crash.log 10 | 11 | Ignore any .tfvars files that are generated automatically for each Terraform run. Most 12 | .tfvars files are managed as part of configuration and so should be included in 13 | version control. 14 | 15 | example.tfvars 16 | 17 | Ignore override files as they are usually used to override resources locally and so 18 | are not checked in 19 | override.tf 20 | override.tf.json 21 | *_override.tf 22 | *_override.tf.json 23 | 24 | Include override files you do wish to add to version control using negated pattern 25 | 26 | !example_override.tf 27 | 28 | Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 29 | example: *tfplan* 30 | 31 | # Local .terraform directories 32 | **/.terraform/* 33 | 34 | # .tfstate files 35 | *.tfstate 36 | *.tfstate.* 37 | 38 | # Crash log files 39 | crash.log 40 | crash.*.log 41 | 42 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 43 | # password, private keys, and other secrets. These should not be part of version 44 | # control as they are data points which are potentially sensitive and subject 45 | # to change depending on the environment. 46 | *.tfvars 47 | *.tfvars.json 48 | 49 | # Ignore override files as they are usually used to override resources locally and so 50 | # are not checked in 51 | override.tf 52 | override.tf.json 53 | *_override.tf 54 | *_override.tf.json 55 | 56 | # Ignore transient lock info files created by terraform apply 57 | .terraform.tfstate.lock.info 58 | 59 | # Include override files you do wish to add to version control using negated pattern 60 | # !example_override.tf 61 | 62 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 63 | # example: *tfplan* 64 | 65 | # Ignore CLI configuration files 66 | .terraformrc 67 | terraform.rc 68 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Jude Quintana 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | ____. ________ ________ 3 | | |____ ___.__.\_____ \ \_____ \ ____ ____ 4 | | \__ \< | | / / \ \ / | \ / \_/ __ \ 5 | /\__| |/ __ \\___ |/ \_/. \ / | \ | \ ___/ 6 | \________(____ / ____|\_____\ \_/_____\_______ /___| /\___ > 7 | \/\/ \__>_____/ \/ \/ \/ 8 | 9 | --=[ PrEsENtZ ]=-- 10 | 11 | --=[ AwS CLouD NeTWoRkiNg SuiTE 3000 ]=-- 12 | 13 | --=[ Build and scale cloud network topologies from base components in AWS and Terraform ]=-- 14 | 15 | --=[ #StayUp ]=-- 16 | ``` 17 | ## NEW Centralized Egress Dual Stack Full Mesh Trio! 18 | [Centralized Egress Dual Stack Full Mesh Trio Demo](https://github.com/JudeQuintana/terraform-main/tree/main/centralized_egress_dual_stack_full_mesh_trio_demo) 19 | - Compose a Centralized IPv4 Egress and Decentralized IPv6 Egress within a Dual Stack Full Mesh Topology across 3 regions using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/tree/v1.0.7) (at `v1.0.7`), [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/tree/v1.0.6) (at `v1.0.6`) and [Full Mesh Trio](https://github.com/JudeQuintana/terraform-aws-full-mesh-trio/tree/v1.0.1) (at `v1.0.1`) modules. 20 | - Includes a VPC peering examples within a full mesh configuration used for high traffic workloads to save on cost using the [VPC Peering Deluxe](https://github.com/JudeQuintana/terraform-aws-vpc-peering-deluxe/tree/v1.0.1) module (at `v1.0.1`). 21 | - Requires IPAM Pools for IPv4 and IPv6 cidrs. 22 | - Validate connectivity with Route Analyzer. 23 | 24 | ## Dual Stack Full Mesh Trio! 25 | [Dual Stack Full Mesh Trio Demo](https://github.com/JudeQuintana/terraform-main/tree/main/dual_stack_full_mesh_trio_demo) 26 | - Compose a dual stack Full Mesh Transit Gateway across 3 regions using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/tree/v1.0.7) (at `v1.0.7`), [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/tree/v1.0.6) (at `v1.0.6`) and [Full Mesh Trio](https://github.com/JudeQuintana/terraform-aws-full-mesh-trio/tree/v1.0.1) (at `v1.0.1`) modules. 27 | - Includes a VPC peering examples within a full mesh configuration used for high traffic workloads to save on cost using the [VPC Peering Deluxe](https://github.com/JudeQuintana/terraform-aws-vpc-peering-deluxe/tree/v1.0.1) module (at `v1.0.1`). 28 | - Requires IPAM Pools for IPv4 and IPv6 cidrs. 29 | - Validate connectivity with Route Analyzer. 30 | 31 | ## Dual Stack TNT Architecture! 32 | [Dual Stack Terraform Networking Trifecta Demo](https://github.com/JudeQuintana/terraform-main/tree/main/dual_stack_networking_trifecta_demo) 33 | - Compose a dual stack hub and spoke Transit Gateway using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/tree/v1.0.7) (at `v1.0.7`) and [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/tree/v1.0.6) (at `v1.0.6`) modules. 34 | - Requires IPAM Pools for IPv4 and IPv6 cidrs. 35 | - Validate connectivity with EC2 instances. 36 | 37 | ## TNT Architecture! 38 | [Terraform Networking Trifecta Demo](https://github.com/JudeQuintana/terraform-main/tree/main/networking_trifecta_demo) 39 | - Compose a hub and spoke Transit Gateway using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/tree/v1.0.1) (at `v1.0.1`) and [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/tree/v1.0.1) (at `v1.0.1`) modules. 40 | - IPv4 only (no IPAM). 41 | - Validate connectivity with EC2 instances. 42 | 43 | ## Super Router! 44 | [Super Router Demo](https://github.com/JudeQuintana/terraform-main/tree/main/super_router_demo) 45 | - Compose a decentralized hub and spoke Transit Gateway using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/tree/v1.0.1) (at `v1.0.1`), [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/tree/v1.0.1) (at `v1.0.1`), and [Super Router](https://github.com/JudeQuintana/terraform-aws-super-router/tree/v1.0.0) (at `v1.0.0`) modules. 46 | - IPv4 only (no IPAM). 47 | - Validate connectivity with AWS Route Analyzer. 48 | 49 | ## Full Mesh Trio! 50 | [Full Mesh Trio Demo](https://github.com/JudeQuintana/terraform-main/tree/main/full_mesh_trio_demo) 51 | - Compose a Full Mesh Transit Gateway across 3 regions using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/v1.0.1) (at `v1.0.1`), [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/v1.0.1) (at `v1.0.1`) and [Full Mesh Trio](https://github.com/JudeQuintana/terraform-aws-full-mesh-trio/tree/v1.0.0) (at `v1.0.0`) modules. 52 | - Includes a VPC peering examples within a full mesh configuration for high traffic workloads to save on cost for intra-region using the [VPC Peering Deluxe](https://github.com/JudeQuintana/terraform-aws-vpc-peering-deluxe/tree/v1.0.0) module (at `v1.0.0`). 53 | - IPv4 only (no IPAM). 54 | - Validate connectivity with AWS Route Analyzer. 55 | 56 | ## Mega Mesh! 57 | [Mega Mesh Demo](https://github.com/JudeQuintana/terraform-main/tree/main/mega_mesh_demo) 58 | - Compose a Full Mesh Transit Gateway across 10 regions using [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-aws-tiered-vpc-ng/tree/v1.0.1) (at `v1.0.1`), [Centralized Router](https://github.com/JudeQuintana/terraform-aws-centralized-router/tree/v1.0.1) (at `v1.0.1`) and [Mega Mesh](https://github.com/JudeQuintana/terraform-aws-mega-mesh/tree/v1.0.0) (at `v1.0.0`) modules. 59 | - IPv4 only (no IPAM). 60 | - Validate connectivity with AWS Route Analyzer. 61 | 62 | --- 63 | ### Useful Tools 64 | - [IPv4 Subnet Calculator](https://visualsubnetcalc.com/#) 65 | - [IPv6 Subnet Calculator](https://subnettingpractice.com/ipv6-subnet-calculator.html) 66 | - `brew install ipcalc` 67 | 68 | --- 69 | ### Notes 70 | - Sometimes I'll blog about ideas at [jq1.io](https://jq1.io). 71 | - All modules are first developed in the [terraform-modules](https://github.com/JudeQuintana/terraform-modules) repo. 72 | - The most useful modules are [published](https://registry.terraform.io/namespaces/JudeQuintana) to the Public Terraform Registry. 73 | - All demos include an example of generating security group rules for intra-region and cross-region VPCs for each TGW configuration. 74 | - [Intra VPC Security Group Rule](https://github.com/JudeQuintana/terraform-aws-intra-vpc-security-group-rule) (IPv4 only) 75 | - [Super Intra VPC Security Group Rules](https://github.com/JudeQuintana/terraform-aws-super-intra-vpc-security-group-rules) (IPv4 only) 76 | - [Full Mesh Intra VPC Security Group Rules](https://github.com/JudeQuintana/terraform-aws-full-mesh-intra-vpc-security-group-rules) (IPv4 only) 77 | - [IPv6 Intra VPC Security Group Rule](https://github.com/JudeQuintana/terraform-aws-ipv6-intra-vpc-security-group-rule) (IPv6 only, for use with dual stack VPCs) 78 | - New [IPv6 Full Mesh Intra VPC Security Group Rules](https://github.com/JudeQuintana/terraform-aws-ipv6-full-mesh-intra-vpc-security-group-rules) (IPv6 only, for use with dual stack VPCs) 79 | - TODO: Mega Mesh Intra VPC Security Group Rules 80 | - Included S3 Gateway examples via VPC Endpoint. 81 | - The Centralized Router module is an implementation of both `AWS Centralized Router` and `Centralized outbound routing to the internet` [concepts](https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-centralized-router.html) and but without VPN Gateway or Direct Connect, only VPCs. 82 | - Available AZs (a,b,c etc) in a region are different per AWS account (ie. your us-west-2a is not the same AZ as my us-west-2a) 83 | so it's possible you'll need to change the AZ letter for a VPC if the provider is saying it's not available for the region. 84 | - There is no overlapping CIDR detection intra-region or cross-region so it's important that the VPC's network and subnet CIDRs are allocated correctly. 85 | - The AWS provider is updated from time to time so you may need to run `terraform init -upgrade` if you've ran init with a previous provider version. 86 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.61.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/blackhole_cidrs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | blackhole = { 3 | cidrs = ["172.16.65.0/24"] # app1 jenkins1 4 | ipv6_cidrs = ["2600:1f26:21:c400::/64"] # app1 test1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/centralized_router_use1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.6" 4 | 5 | providers = { 6 | aws = aws.use1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "mystique" 13 | amazon_side_asn = 64519 14 | vpcs = module.vpcs_use1 15 | blackhole = local.blackhole 16 | } 17 | } 18 | 19 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/centralized_router_use2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.6" 4 | 5 | providers = { 6 | aws = aws.use2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "magneto" 13 | amazon_side_asn = 64520 14 | vpcs = module.vpcs_use2 15 | blackhole = local.blackhole 16 | } 17 | } 18 | 19 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/centralized_router_usw2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_usw2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.6" 4 | 5 | providers = { 6 | aws = aws.usw2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "arch-angel" 13 | amazon_side_asn = 64521 14 | vpcs = module.vpcs_usw2 15 | blackhole = local.blackhole 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/full_mesh_trio.tf: -------------------------------------------------------------------------------- 1 | module "full_mesh_trio" { 2 | source = "JudeQuintana/full-mesh-trio/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws.one = aws.use1 7 | aws.two = aws.use2 8 | aws.three = aws.usw2 9 | } 10 | 11 | env_prefix = var.env_prefix 12 | full_mesh_trio = { 13 | one = { 14 | centralized_router = module.centralized_router_use1 15 | } 16 | two = { 17 | centralized_router = module.centralized_router_use2 18 | } 19 | three = { 20 | centralized_router = module.centralized_router_usw2 21 | } 22 | } 23 | } 24 | 25 | output "full_mesh_trio" { 26 | value = module.full_mesh_trio 27 | } 28 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/providers.tf: -------------------------------------------------------------------------------- 1 | # base region 2 | provider "aws" { 3 | region = "us-west-2" 4 | } 5 | 6 | provider "aws" { 7 | alias = "use1" 8 | region = "us-east-1" 9 | } 10 | 11 | provider "aws" { 12 | alias = "use2" 13 | region = "us-east-2" 14 | } 15 | 16 | provider "aws" { 17 | alias = "usw2" 18 | region = "us-west-2" 19 | } 20 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/security_group_rules.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # allow all ssh and ping communication between all VPCs within each region's intra-vpc security group 3 | security_group_rules = [ 4 | { 5 | label = "ssh" 6 | protocol = "tcp" 7 | from_port = 22 8 | to_port = 22 9 | }, 10 | { 11 | label = "ping" 12 | protocol = "icmp" 13 | from_port = 8 14 | to_port = 0 15 | } 16 | ] 17 | 18 | intra_vpc_security_group_rules = { for r in local.security_group_rules : r.label => r } 19 | } 20 | 21 | module "intra_vpc_security_group_rules_use1" { 22 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 23 | version = "1.0.1" 24 | 25 | providers = { 26 | aws = aws.use1 27 | } 28 | 29 | for_each = local.intra_vpc_security_group_rules 30 | 31 | env_prefix = var.env_prefix 32 | region_az_labels = var.region_az_labels 33 | intra_vpc_security_group_rule = { 34 | rule = each.value 35 | vpcs = module.vpcs_use1 36 | } 37 | } 38 | 39 | module "intra_vpc_security_group_rules_use2" { 40 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 41 | version = "1.0.1" 42 | 43 | providers = { 44 | aws = aws.use2 45 | } 46 | 47 | for_each = local.intra_vpc_security_group_rules 48 | 49 | env_prefix = var.env_prefix 50 | region_az_labels = var.region_az_labels 51 | intra_vpc_security_group_rule = { 52 | rule = each.value 53 | vpcs = module.vpcs_use2 54 | } 55 | } 56 | 57 | module "intra_vpc_security_group_rules_usw2" { 58 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 59 | version = "1.0.1" 60 | 61 | providers = { 62 | aws = aws.usw2 63 | } 64 | 65 | for_each = local.intra_vpc_security_group_rules 66 | 67 | env_prefix = var.env_prefix 68 | region_az_labels = var.region_az_labels 69 | intra_vpc_security_group_rule = { 70 | rule = each.value 71 | vpcs = module.vpcs_usw2 72 | } 73 | } 74 | 75 | ## allow all ssh and ping communication between all VPCs across regions in each intra-vpc security group 76 | module "full_mesh_intra_vpc_security_group_rules" { 77 | source = "JudeQuintana/full-mesh-intra-vpc-security-group-rules/aws" 78 | version = "1.0.1" 79 | 80 | providers = { 81 | aws.one = aws.use1 82 | aws.two = aws.use2 83 | aws.three = aws.usw2 84 | } 85 | 86 | env_prefix = var.env_prefix 87 | region_az_labels = var.region_az_labels 88 | full_mesh_intra_vpc_security_group_rules = { 89 | one = { 90 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use1 91 | } 92 | two = { 93 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use2 94 | } 95 | three = { 96 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_usw2 97 | } 98 | } 99 | } 100 | 101 | # IPv6 102 | locals { 103 | ipv6_security_group_rules = [ 104 | { 105 | label = "ssh6" 106 | protocol = "tcp" 107 | from_port = 22 108 | to_port = 22 109 | }, 110 | { 111 | label = "ping6" 112 | protocol = "icmpv6" 113 | from_port = -1 114 | to_port = -1 115 | } 116 | ] 117 | 118 | ipv6_intra_vpc_security_group_rules = { for r in local.ipv6_security_group_rules : r.label => r } 119 | } 120 | 121 | module "ipv6_intra_vpc_security_group_rules_use1" { 122 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 123 | version = "1.0.1" 124 | 125 | providers = { 126 | aws = aws.use1 127 | } 128 | 129 | for_each = local.ipv6_intra_vpc_security_group_rules 130 | 131 | env_prefix = var.env_prefix 132 | region_az_labels = var.region_az_labels 133 | ipv6_intra_vpc_security_group_rule = { 134 | rule = each.value 135 | vpcs = module.vpcs_use1 136 | } 137 | } 138 | 139 | module "ipv6_intra_vpc_security_group_rules_use2" { 140 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 141 | version = "1.0.1" 142 | 143 | providers = { 144 | aws = aws.use2 145 | } 146 | 147 | for_each = local.ipv6_intra_vpc_security_group_rules 148 | 149 | env_prefix = var.env_prefix 150 | region_az_labels = var.region_az_labels 151 | ipv6_intra_vpc_security_group_rule = { 152 | rule = each.value 153 | vpcs = module.vpcs_use2 154 | } 155 | } 156 | 157 | module "ipv6_intra_vpc_security_group_rules_usw2" { 158 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 159 | version = "1.0.1" 160 | 161 | providers = { 162 | aws = aws.usw2 163 | } 164 | 165 | for_each = local.ipv6_intra_vpc_security_group_rules 166 | 167 | env_prefix = var.env_prefix 168 | region_az_labels = var.region_az_labels 169 | ipv6_intra_vpc_security_group_rule = { 170 | rule = each.value 171 | vpcs = module.vpcs_usw2 172 | } 173 | } 174 | 175 | module "ipv6_full_mesh_intra_vpc_security_group_rules" { 176 | source = "JudeQuintana/ipv6-full-mesh-intra-vpc-security-group-rules/aws" 177 | version = "1.0.0" 178 | 179 | providers = { 180 | aws.one = aws.use1 181 | aws.two = aws.use2 182 | aws.three = aws.usw2 183 | } 184 | 185 | env_prefix = var.env_prefix 186 | region_az_labels = var.region_az_labels 187 | ipv6_full_mesh_intra_vpc_security_group_rules = { 188 | one = { 189 | ipv6_intra_vpc_security_group_rules = module.ipv6_intra_vpc_security_group_rules_use1 190 | } 191 | two = { 192 | ipv6_intra_vpc_security_group_rules = module.ipv6_intra_vpc_security_group_rules_use2 193 | } 194 | three = { 195 | ipv6_intra_vpc_security_group_rules = module.ipv6_intra_vpc_security_group_rules_usw2 196 | } 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "region_az_labels" { 8 | description = "Update this map with regions and AZs that will be in use for short name labeling" 9 | type = map(string) 10 | default = { 11 | us-west-2 = "usw2" 12 | us-west-2a = "usw2a" 13 | us-west-2b = "usw2b" 14 | us-west-2c = "usw2c" 15 | us-east-1 = "use1" 16 | us-east-1a = "use1a" 17 | us-east-1b = "use1b" 18 | us-east-1c = "use1c" 19 | us-east-2 = "use2" 20 | us-east-2a = "use2a" 21 | us-east-2b = "use2b" 22 | us-east-2c = "use2c" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpc_endpoint_service_name_fmt = "com.amazonaws.%s.s3" 4 | vpc_endpoint_type = "Gateway" 5 | 6 | vpcs_use1_with_private_route_table_ids = { for this in module.vpcs_use1 : this.name => this if length(this.private_route_table_ids) > 0 } 7 | vpcs_use2_with_private_route_table_ids = { for this in module.vpcs_use2 : this.name => this if length(this.private_route_table_ids) > 0 } 8 | vpcs_usw2_with_private_route_table_ids = { for this in module.vpcs_usw2 : this.name => this if length(this.private_route_table_ids) > 0 } 9 | } 10 | 11 | resource "aws_vpc_endpoint" "s3_use1" { 12 | provider = aws.use1 13 | 14 | for_each = local.vpcs_use1_with_private_route_table_ids 15 | 16 | vpc_id = each.value.id 17 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 18 | vpc_endpoint_type = local.vpc_endpoint_type 19 | route_table_ids = each.value.private_route_table_ids 20 | } 21 | 22 | resource "aws_vpc_endpoint" "s3_use2" { 23 | provider = aws.use2 24 | 25 | for_each = local.vpcs_use2_with_private_route_table_ids 26 | 27 | vpc_id = each.value.id 28 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 29 | vpc_endpoint_type = local.vpc_endpoint_type 30 | route_table_ids = each.value.private_route_table_ids 31 | } 32 | 33 | resource "aws_vpc_endpoint" "s3_usw2" { 34 | provider = aws.usw2 35 | 36 | for_each = local.vpcs_usw2_with_private_route_table_ids 37 | 38 | vpc_id = each.value.id 39 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 40 | vpc_endpoint_type = local.vpc_endpoint_type 41 | route_table_ids = each.value.private_route_table_ids 42 | } 43 | 44 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/vpc_peering.tf: -------------------------------------------------------------------------------- 1 | # cross region peering, only route specific subnets across peering connection 2 | # more cost effective vs using transit gateway for cross region traffic 3 | module "vpc_peering_deluxe_use1_general3_to_use2_app1" { 4 | source = "JudeQuintana/vpc-peering-deluxe/aws" 5 | version = "1.0.1" 6 | 7 | providers = { 8 | aws.local = aws.use1 9 | aws.peer = aws.use2 10 | } 11 | 12 | env_prefix = var.env_prefix 13 | vpc_peering_deluxe = { 14 | local = { 15 | vpc = lookup(module.vpcs_use1, "general3") 16 | only_route = { 17 | subnet_cidrs = ["192.168.65.0/24"] 18 | ipv6_subnet_cidrs = ["2600:1f28:3d:c400::/64"] 19 | } 20 | } 21 | peer = { 22 | vpc = lookup(module.vpcs_use2, "app1") 23 | only_route = { 24 | subnet_cidrs = ["172.16.68.0/28"] 25 | ipv6_subnet_cidrs = ["2600:1f26:21:c004::/64"] 26 | } 27 | } 28 | } 29 | } 30 | 31 | # intra region vpc peering, route all subnets across peering connection 32 | # more cost effective vs using transit gateway when cidr traffic is within same AZ. 33 | module "vpc_peering_deluxe_usw2_app2_to_usw2_general2" { 34 | source = "JudeQuintana/vpc-peering-deluxe/aws" 35 | version = "1.0.1" 36 | 37 | providers = { 38 | aws.local = aws.usw2 39 | aws.peer = aws.usw2 40 | } 41 | 42 | env_prefix = var.env_prefix 43 | vpc_peering_deluxe = { 44 | local = { 45 | vpc = lookup(module.vpcs_usw2, "app2") 46 | } 47 | peer = { 48 | vpc = lookup(module.vpcs_usw2, "general2") 49 | } 50 | } 51 | } 52 | 53 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/vpcs_use1.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | # main ipam in usw2 with a pool for use1 locale 3 | data "aws_vpc_ipam_pool" "ipv4_use1" { 4 | provider = aws.use1 5 | 6 | filter { 7 | name = "description" 8 | values = ["ipv4-test-use1"] 9 | } 10 | filter { 11 | name = "address-family" 12 | values = ["ipv4"] 13 | } 14 | } 15 | 16 | data "aws_vpc_ipam_pool" "ipv6_use1" { 17 | provider = aws.use1 18 | 19 | filter { 20 | name = "description" 21 | values = ["ipv6-test-use1"] 22 | } 23 | filter { 24 | name = "address-family" 25 | values = ["ipv6"] 26 | } 27 | } 28 | 29 | locals { 30 | ipv4_ipam_pool_use1 = data.aws_vpc_ipam_pool.ipv4_use1 31 | ipv6_ipam_pool_use1 = data.aws_vpc_ipam_pool.ipv6_use1 32 | 33 | tiered_vpcs_use1 = [ 34 | { 35 | name = "app3" 36 | ipv4 = { 37 | network_cidr = "10.0.64.0/18" 38 | secondary_cidrs = ["10.1.64.0/20"] 39 | ipam_pool = local.ipv4_ipam_pool_use1 40 | centralized_egress = { 41 | private = true 42 | } 43 | } 44 | ipv6 = { 45 | network_cidr = "2600:1f28:3d:c000::/56" 46 | ipam_pool = local.ipv6_ipam_pool_use1 47 | } 48 | azs = { 49 | a = { 50 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 51 | private_subnets = [ 52 | { name = "cluster1", cidr = "10.0.64.0/24", ipv6_cidr = "2600:1f28:3d:c000::/64" } 53 | ] 54 | # Enable a NAT Gateway for all private subnets in the same AZ 55 | # by adding the "natgw = true" attribute to any public subnet 56 | public_subnets = [ 57 | { name = "random1", cidr = "10.0.66.0/28", ipv6_cidr = "2600:1f28:3d:c001::/64", special = true }, 58 | { name = "haproxy1", cidr = "10.0.67.64/26", ipv6_cidr = "2600:1f28:3d:c002::/64" } 59 | ] 60 | } 61 | b = { 62 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 63 | private_subnets = [ 64 | { name = "cluster2", cidr = "10.0.70.0/24", ipv6_cidr = "2600:1f28:3d:c003::/64" } 65 | ] 66 | public_subnets = [ 67 | { name = "random2", cidr = "10.0.72.0/28", ipv6_cidr = "2600:1f28:3d:c004::/64", special = true }, 68 | { name = "haproxy2", cidr = "10.0.73.64/26", ipv6_cidr = "2600:1f28:3d:c005::/64" }, 69 | #secondary subnet 70 | { name = "other1", cidr = "10.1.64.0/24", ipv6_cidr = "2600:1f28:3d:c006::/64" } 71 | ] 72 | } 73 | } 74 | }, 75 | { 76 | name = "infra3" 77 | ipv4 = { 78 | network_cidr = "172.18.0.0/18" 79 | secondary_cidrs = ["172.18.64.0/20"] 80 | ipam_pool = local.ipv4_ipam_pool_use1 81 | centralized_egress = { 82 | private = true 83 | } 84 | } 85 | ipv6 = { 86 | network_cidr = "2600:1f28:3d:c700::/56" 87 | secondary_cidrs = ["2600:1f28:3d:c800::/56"] 88 | ipam_pool = local.ipv6_ipam_pool_use1 89 | } 90 | azs = { 91 | a = { 92 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 93 | private_subnets = [ 94 | { name = "haproxy5", cidr = "172.18.0.0/24", ipv6_cidr = "2600:1f28:3d:c700::/64" } 95 | ] 96 | public_subnets = [ 97 | { name = "edge3", cidr = "172.18.3.0/24", ipv6_cidr = "2600:1f28:3d:c703::/64" }, 98 | { name = "edge4", cidr = "172.18.4.0/24", ipv6_cidr = "2600:1f28:3d:c705::/64", special = true } 99 | ] 100 | isolated_subnets = [ 101 | # ipv6 secondary cidr 102 | { name = "db11", cidr = "172.18.9.0/24", ipv6_cidr = "2600:1f28:3d:c880::/60" } 103 | ] 104 | } 105 | b = { 106 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 107 | private_subnets = [ 108 | { name = "util4", cidr = "172.18.12.0/24", ipv6_cidr = "2600:1f28:3d:c708::/64" }, 109 | { name = "util5", cidr = "172.18.15.0/24", ipv6_cidr = "2600:1f28:3d:c70a::/64", special = true } 110 | ] 111 | isolated_subnets = [ 112 | # secondary cidr 113 | { name = "db12", cidr = "172.18.67.0/24", ipv6_cidr = "2600:1f28:3d:c70c::/64" } 114 | ] 115 | } 116 | } 117 | }, 118 | { 119 | name = "general3" 120 | ipv4 = { 121 | network_cidr = "192.168.64.0/18" 122 | secondary_cidrs = ["192.168.128.0/20"] 123 | ipam_pool = local.ipv4_ipam_pool_use1 124 | centralized_egress = { 125 | central = true 126 | } 127 | } 128 | ipv6 = { 129 | network_cidr = "2600:1f28:3d:c400::/56" 130 | ipam_pool = local.ipv6_ipam_pool_use1 131 | } 132 | azs = { 133 | a = { 134 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 135 | private_subnets = [ 136 | { name = "cluster4", cidr = "192.168.65.0/24", ipv6_cidr = "2600:1f28:3d:c400::/64", special = true } 137 | ] 138 | public_subnets = [ 139 | { name = "random2", cidr = "192.168.67.0/28", ipv6_cidr = "2600:1f28:3d:c401::/64", }, 140 | { name = "haproxy1", cidr = "192.168.68.64/26", ipv6_cidr = "2600:1f28:3d:c402::/64", natgw = true } 141 | ] 142 | } 143 | b = { 144 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 145 | private_subnets = [ 146 | { name = "experiment1", cidr = "192.168.70.0/24", ipv6_cidr = "2600:1f28:3d:c403::/64", special = true } 147 | ] 148 | public_subnets = [ 149 | { name = "random3", cidr = "192.168.71.0/28", ipv6_cidr = "2600:1f28:3d:c404::/64", natgw = true }, 150 | { name = "haproxy3", cidr = "192.168.72.64/26", ipv6_cidr = "2600:1f28:3d:c405::/64" }, 151 | # secondary subnet 152 | { name = "haproxy2", cidr = "192.168.128.0/24", ipv6_cidr = "2600:1f28:3d:c406::/64" } 153 | ] 154 | } 155 | } 156 | } 157 | ] 158 | } 159 | 160 | module "vpcs_use1" { 161 | source = "JudeQuintana/tiered-vpc-ng/aws" 162 | version = "1.0.7" 163 | 164 | providers = { 165 | aws = aws.use1 166 | } 167 | 168 | for_each = { for t in local.tiered_vpcs_use1 : t.name => t } 169 | 170 | env_prefix = var.env_prefix 171 | region_az_labels = var.region_az_labels 172 | tiered_vpc = each.value 173 | } 174 | 175 | output "vpcs_use1_natgw_eips_per_az" { 176 | value = { for v in module.vpcs_use1 : v.name => v.public_natgw_az_to_eip } 177 | } 178 | 179 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/vpcs_use2.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | # main ipam in usw2 with a pool for use2 locale 3 | data "aws_vpc_ipam_pool" "ipv4_use2" { 4 | provider = aws.use2 5 | 6 | filter { 7 | name = "description" 8 | values = ["ipv4-test-use2"] 9 | } 10 | filter { 11 | name = "address-family" 12 | values = ["ipv4"] 13 | } 14 | } 15 | 16 | data "aws_vpc_ipam_pool" "ipv6_use2" { 17 | provider = aws.use2 18 | 19 | filter { 20 | name = "description" 21 | values = ["ipv6-test-use2"] 22 | } 23 | filter { 24 | name = "address-family" 25 | values = ["ipv6"] 26 | } 27 | } 28 | 29 | locals { 30 | ipv4_ipam_pool_use2 = data.aws_vpc_ipam_pool.ipv4_use2 31 | ipv6_ipam_pool_use2 = data.aws_vpc_ipam_pool.ipv6_use2 32 | 33 | tiered_vpcs_use2 = [ 34 | { 35 | name = "app1" 36 | ipv4 = { 37 | network_cidr = "172.16.64.0/18" 38 | secondary_cidrs = ["172.16.192.0/20"] 39 | ipam_pool = local.ipv4_ipam_pool_use2 40 | centralized_egress = { 41 | private = true 42 | } 43 | } 44 | ipv6 = { 45 | network_cidr = "2600:1f26:21:c000::/56" 46 | secondary_cidrs = ["2600:1f26:21:c400::/56"] 47 | ipam_pool = local.ipv6_ipam_pool_use2 48 | } 49 | azs = { 50 | a = { 51 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 52 | private_subnets = [ 53 | { name = "jenkins1", cidr = "172.16.65.0/24", ipv6_cidr = "2600:1f26:21:c001::/64" } 54 | ] 55 | public_subnets = [ 56 | { name = "other", cidr = "172.16.68.0/28", ipv6_cidr = "2600:1f26:21:c002::/64", special = true }, 57 | { name = "other2", cidr = "172.16.76.16/28", ipv6_cidr = "2600:1f26:21:c003::/64" }, 58 | # ipv6 secondary cidr 59 | { name = "test1", cidr = "172.16.77.32/28", ipv6_cidr = "2600:1f26:21:c400::/60" } 60 | ] 61 | } 62 | c = { 63 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 64 | private_subnets = [ 65 | # secondary cidr 66 | { name = "experiment1", cidr = "172.16.192.0/24", ipv6_cidr = "2600:1f26:21:c004::/64", special = true } 67 | ] 68 | public_subnets = [ 69 | #ipv4 secondary cidr and ipv6 secondary cidr 70 | { name = "test2", cidr = "172.16.194.0/24", ipv6_cidr = "2600:1f26:21:c410::/60" } 71 | ] 72 | } 73 | } 74 | }, 75 | { 76 | name = "infra1" 77 | ipv4 = { 78 | network_cidr = "192.168.192.0/18" 79 | secondary_cidrs = ["192.168.160.0/20"] 80 | ipam_pool = local.ipv4_ipam_pool_use2 81 | centralized_egress = { 82 | private = true 83 | } 84 | } 85 | ipv6 = { 86 | network_cidr = "2600:1f26:21:c900::/56" 87 | ipam_pool = local.ipv6_ipam_pool_use2 88 | } 89 | azs = { 90 | a = { 91 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 92 | private_subnets = [ 93 | { name = "nginx8", cidr = "192.168.192.0/24", ipv6_cidr = "2600:1f26:21:c900::/64" } 94 | ] 95 | public_subnets = [ 96 | { name = "edge6", cidr = "192.168.195.0/24", ipv6_cidr = "2600:1f26:21:c901::/64" }, 97 | { name = "edge7", cidr = "192.168.196.0/24", ipv6_cidr = "2600:1f26:21:c902::/64", special = true } 98 | ] 99 | isolated_subnets = [ 100 | { name = "db8", cidr = "192.168.200.0/24", ipv6_cidr = "2600:1f26:21:c909::/64" } 101 | ] 102 | } 103 | c = { 104 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 105 | private_subnets = [ 106 | { name = "util6", cidr = "192.168.202.0/24", ipv6_cidr = "2600:1f26:21:c90d::/64" }, 107 | { name = "util7", cidr = "192.168.204.0/24", ipv6_cidr = "2600:1f26:21:c90e::/64", special = true } 108 | ] 109 | isolated_subnets = [ 110 | # secondary cidr 111 | { name = "db9", cidr = "192.168.161.0/24", ipv6_cidr = "2600:1f26:21:c911::/64" } 112 | ] 113 | } 114 | } 115 | }, 116 | { 117 | name = "general1" 118 | ipv4 = { 119 | network_cidr = "172.16.128.0/18" 120 | secondary_cidrs = ["172.16.208.0/20"] 121 | ipam_pool = local.ipv4_ipam_pool_use2 122 | centralized_egress = { 123 | central = true 124 | } 125 | } 126 | ipv6 = { 127 | network_cidr = "2600:1f26:21:c100::/56" 128 | ipam_pool = local.ipv6_ipam_pool_use2 129 | } 130 | azs = { 131 | a = { 132 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 133 | private_subnets = [ 134 | { name = "artifacts2", cidr = "172.16.129.0/24", ipv6_cidr = "2600:1f26:21:c101::/64", special = true } 135 | ] 136 | public_subnets = [ 137 | { name = "random1", cidr = "172.16.131.0/28", ipv6_cidr = "2600:1f26:21:c102::/64", natgw = true } 138 | ] 139 | } 140 | c = { 141 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 142 | private_subnets = [ 143 | { name = "jenkins2", cidr = "172.16.132.0/24", ipv6_cidr = "2600:1f26:21:c103::/64", special = true } 144 | ] 145 | public_subnets = [ 146 | { name = "random2", cidr = "172.16.133.0/28", ipv6_cidr = "2600:1f26:21:c104::/64", natgw = true }, 147 | # secondary cidr 148 | { name = "random3", cidr = "172.16.208.0/24", ipv6_cidr = "2600:1f26:21:c105::/64" } 149 | ] 150 | } 151 | } 152 | } 153 | ] 154 | } 155 | 156 | module "vpcs_use2" { 157 | source = "JudeQuintana/tiered-vpc-ng/aws" 158 | version = "1.0.7" 159 | 160 | providers = { 161 | aws = aws.use2 162 | } 163 | 164 | for_each = { for t in local.tiered_vpcs_use2 : t.name => t } 165 | 166 | env_prefix = var.env_prefix 167 | region_az_labels = var.region_az_labels 168 | tiered_vpc = each.value 169 | } 170 | 171 | output "vpcs_use2_natgw_eips_per_az" { 172 | value = { for v in module.vpcs_use2 : v.name => v.public_natgw_az_to_eip } 173 | } 174 | 175 | -------------------------------------------------------------------------------- /centralized_egress_dual_stack_full_mesh_trio_demo/vpcs_usw2.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | # main ipam in usw2 with a pool for usw2 locale 3 | data "aws_vpc_ipam_pool" "ipv4_usw2" { 4 | provider = aws.usw2 5 | 6 | filter { 7 | name = "description" 8 | values = ["ipv4-test-usw2"] 9 | } 10 | filter { 11 | name = "address-family" 12 | values = ["ipv4"] 13 | } 14 | } 15 | 16 | data "aws_vpc_ipam_pool" "ipv6_usw2" { 17 | provider = aws.usw2 18 | 19 | filter { 20 | name = "description" 21 | values = ["ipv6-test-usw2"] 22 | } 23 | filter { 24 | name = "address-family" 25 | values = ["ipv6"] 26 | } 27 | } 28 | 29 | locals { 30 | ipv4_ipam_pool_usw2 = data.aws_vpc_ipam_pool.ipv4_usw2 31 | ipv6_ipam_pool_usw2 = data.aws_vpc_ipam_pool.ipv6_usw2 32 | } 33 | 34 | # ipv4 and ipv6 must use an ipam pool 35 | # can start with ipv4 only and then add ipv6 later if needed. 36 | # vpcs with an ipv4 network cidr /18 provides /20 subnet for each AZ. 37 | locals { 38 | tiered_vpcs_usw2 = [ 39 | { 40 | name = "app2" 41 | ipv4 = { 42 | network_cidr = "10.0.0.0/18" 43 | secondary_cidrs = ["10.1.0.0/20"] 44 | ipam_pool = local.ipv4_ipam_pool_usw2 45 | centralized_egress = { 46 | private = true 47 | } 48 | } 49 | ipv6 = { 50 | network_cidr = "2600:1f24:66:c000::/56" 51 | ipam_pool = local.ipv6_ipam_pool_usw2 52 | } 53 | azs = { 54 | a = { 55 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 56 | private_subnets = [ 57 | { name = "another", cidr = "10.0.9.0/24", ipv6_cidr = "2600:1f24:66:c008::/64" } 58 | ] 59 | public_subnets = [ 60 | { name = "random1", cidr = "10.0.3.0/28", ipv6_cidr = "2600:1f24:66:c000::/64" }, 61 | { name = "haproxy1", cidr = "10.0.4.0/26", ipv6_cidr = "2600:1f24:66:c001::/64" }, 62 | { name = "other", cidr = "10.0.10.0/28", ipv6_cidr = "2600:1f24:66:c002::/64", special = true } 63 | ] 64 | isolated_subnets = [ 65 | # secondary cidr 66 | { name = "db1", cidr = "10.1.13.0/24", ipv6_cidr = "2600:1f24:66:c050::/60" } 67 | ] 68 | } 69 | b = { 70 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 71 | private_subnets = [ 72 | { name = "cluster2", cidr = "10.0.16.0/24", ipv6_cidr = "2600:1f24:66:c006::/64" }, 73 | { name = "random2", cidr = "10.0.17.0/24", ipv6_cidr = "2600:1f24:66:c007::/64", special = true } 74 | ] 75 | isolated_subnets = [ 76 | # secondary cidr 77 | { name = "db2", cidr = "10.1.0.0/24", ipv6_cidr = "2600:1f24:66:c009::/64" } 78 | ] 79 | } 80 | } 81 | }, 82 | { 83 | name = "infra2" 84 | ipv4 = { 85 | network_cidr = "10.2.0.0/18" 86 | secondary_cidrs = ["10.2.64.0/20"] 87 | ipam_pool = local.ipv4_ipam_pool_usw2 88 | centralized_egress = { 89 | private = true 90 | } 91 | } 92 | ipv6 = { 93 | network_cidr = "2600:1f24:66:ca00::/56" 94 | secondary_cidrs = ["2600:1f24:66:cd00::/56"] 95 | ipam_pool = local.ipv6_ipam_pool_usw2 96 | } 97 | azs = { 98 | a = { 99 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 100 | private_subnets = [ 101 | { name = "util4", cidr = "10.2.0.0/24", ipv6_cidr = "2600:1f24:66:ca00::/64" } 102 | ] 103 | public_subnets = [ 104 | { name = "edge1", cidr = "10.2.6.0/24", ipv6_cidr = "2600:1f24:66:ca01::/64" }, 105 | { name = "edge2", cidr = "10.2.7.0/24", ipv6_cidr = "2600:1f24:66:ca02::/64", special = true } 106 | ] 107 | isolated_subnets = [ 108 | # ipv6 secondary cidr 109 | { name = "db4", cidr = "10.2.10.0/28", ipv6_cidr = "2600:1f24:66:cd10::/60" } 110 | ] 111 | } 112 | b = { 113 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 114 | private_subnets = [ 115 | { name = "ngnix1", cidr = "10.2.9.0/24", ipv6_cidr = "2600:1f24:66:ca1b::/64" }, 116 | { name = "nginx2", cidr = "10.2.11.0/24", ipv6_cidr = "2600:1f24:66:ca1c::/64", special = true } 117 | ] 118 | isolated_subnets = [ 119 | # secondary cidr 120 | { name = "db6", cidr = "10.2.65.0/24", ipv6_cidr = "2600:1f24:66:ca1d::/64" } 121 | ] 122 | } 123 | } 124 | }, 125 | { 126 | name = "general2" 127 | ipv4 = { 128 | network_cidr = "192.168.0.0/18" 129 | secondary_cidrs = ["192.168.144.0/20"] 130 | ipam_pool = local.ipv4_ipam_pool_usw2 131 | centralized_egress = { 132 | central = true 133 | } 134 | } 135 | ipv6 = { 136 | network_cidr = "2600:1f24:66:c100::/56" 137 | ipam_pool = local.ipv6_ipam_pool_usw2 138 | } 139 | azs = { 140 | a = { 141 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 142 | private_subnets = [ 143 | { name = "util2", cidr = "192.168.10.0/24", ipv6_cidr = "2600:1f24:66:c100::/64", special = true }, 144 | { name = "util1", cidr = "192.168.11.0/24", ipv6_cidr = "2600:1f24:66:c101::/64" } 145 | ] 146 | public_subnets = [ 147 | { name = "other2", cidr = "192.168.14.0/28", ipv6_cidr = "2600:1f24:66:c108::/64", natgw = true } 148 | ] 149 | } 150 | b = { 151 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 152 | private_subnets = [ 153 | { name = "cluster5", cidr = "192.168.13.0/24", ipv6_cidr = "2600:1f24:66:c102::/64", special = true } 154 | ] 155 | public_subnets = [ 156 | # secondary subnet 157 | { name = "other3", cidr = "192.168.144.0/24", ipv6_cidr = "2600:1f24:66:c109::/64", natgw = true } 158 | ] 159 | } 160 | } 161 | } 162 | ] 163 | } 164 | 165 | module "vpcs_usw2" { 166 | source = "JudeQuintana/tiered-vpc-ng/aws" 167 | version = "1.0.7" 168 | 169 | providers = { 170 | aws = aws.usw2 171 | } 172 | 173 | for_each = { for t in local.tiered_vpcs_usw2 : t.name => t } 174 | 175 | env_prefix = var.env_prefix 176 | region_az_labels = var.region_az_labels 177 | tiered_vpc = each.value 178 | } 179 | 180 | output "vpcs_usw2_natgw_eips_per_az" { 181 | value = { for v in module.vpcs_usw2 : v.name => v.public_natgw_az_to_eip } 182 | } 183 | 184 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.61.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/README.md: -------------------------------------------------------------------------------- 1 | # Dual Stack Full Mesh Trio Demo 2 | - Dual stack VPCs with IPAM and Full Mesh Transit Gateway across 3 regions 3 | - This is the dual stack version of the (IPv4 only) [Full Mesh Trio demo](https://github.com/JudeQuintana/terraform-main/tree/main/full_mesh_trio_demo). 4 | - Both IPv4 and IPv6 secondary cidrs are supported. 5 | - Start with IPv4 only and add IPv6 at a later time or start with both. 6 | - Demo does not work as-is because these Amazon owned IPv6 CIDRs have been allocated to my AWS account. 7 | - You'll need to configure your own IPv4 and IPv6 cidr pools/subpools and there is IPAM instructions below. 8 | 9 | ### VPC CIDRs 10 | - `us-east-2` 11 | - App1 VPC Tier: 12 | - IPv4: `172.16.64.0/18` 13 | - IPv4 Secondaries: `172.16.192.0/20` 14 | - IPv6: `2600:1f26:21:c000::/56` 15 | - IPv6 Secondaries: `2600:1f26:21:c400::/56` 16 | - General1 VPC Tier: 17 | - IPv4: `172.16.128.0/18` 18 | - IPv4 Secondaries: `172.16.208.0/20` 19 | - IPv6: `2600:1f26:21:c100::/56` 20 | - No IPv6 Secondaries 21 | 22 | - `us-west-2` 23 | - App2 VPC Tier: 24 | - IPv4: `10.0.0.0/18` 25 | - IPv4 Secondaries: `10.1.0.0/20` 26 | - IPv6: `2600:1f24:66:c000::/56` 27 | - No IPv6 Secondaries 28 | - General2 VPC Tier: 29 | - IPv4: `192.168.0.0/18` 30 | - IPv4 Secondaries: `192.168.144.0/20` 31 | - IPv6: `2600:1f24:66:c100::/56` 32 | - No IPv6 Secondaries 33 | 34 | - `us-east-1` 35 | - App3 VPC Tier: 36 | - IPv4: `10.0.64.0/18` 37 | - IPv4 Secondaries: `10.1.64.0/20` 38 | - IPv6: `2600:1f28:3d:c000::/56` 39 | - No IPv6 Secondaries 40 | - General3 VPC Tier: 41 | - IPv4: `192.168.64.0/18` 42 | - IPv4 Secondaries: `192.168.128.0/20` 43 | - IPv6: `2600:1f28:3d:c400::/56` 44 | - No IPv6 Secondaries 45 | 46 | VPCs with an IPv4 network cidr /18 provides /20 subnet for each AZ (up to 4 AZs). 47 | 48 | The resulting architecture is a ipv4 only or a dual stack full mesh topology across 3 regions: 49 | ![dual-stack-full-mesh-trio](https://jq1-io.s3.us-east-1.amazonaws.com/dual-stack/dual-stack-full-mesh-trio.png) 50 | 51 | ### IPAM Configuration 52 | - There are many ways to configure IPAM so I manually created IPAM pools (advanced tier) in the AWS UI. 53 | - Demo does not work as-is because these Amazon owned IPv6 CIDRs have been allocated to my AWS account. 54 | - You'll need to configure your own IPv4 and IPv6 cidr pools/subpools. 55 | - Advanced Tier IPAM in `us-east-2`, `us-west-2`, and `us-east-1` operating reigons. 56 | - In this demo, ipam pools for all locales are managed in the `us-west-2` region via AWS Console UI. 57 | - No IPv4 regional pools at the moment. 58 | - IPv6 subpools need a IPv6 regional pool with `/52` to be able to provision `/56` per locale. 59 | - `us-east-2` (ipam locale) 60 | - IPv4 Pool (private scope) 61 | - Description: `ipv4-test-use2` 62 | - Provisioned CIDRs: 63 | - `172.16.64.0/18` 64 | - `172.16.128.0/18` 65 | - `172.16.192.0/20` 66 | - `172.16.208.0/20` 67 | - IPv6 regional pool (public scope) 68 | - `2600:1f26:21:c000::/52` 69 | - IPv6 subpool (public scope) 70 | - Description: `ipv6-test-use2` 71 | - Provisioned CIDRs: 72 | - `2600:1f26:21:c000::/56` 73 | - `2600:1f26:21:c100::/56` 74 | - `2600:1f26:21:c400::/56` 75 | 76 | - `us-west-2` (ipam locale) 77 | - IPv4 Pool (private scope) 78 | - Description: `ipv4-test-usw2` 79 | - Provisioned CIDRs: 80 | - `10.0.0.0/18` 81 | - `10.1.0.0/20` 82 | - `192.168.0.0/18` 83 | - `192.168.144.0/20` 84 | - IPv6 regional pool (public scope) 85 | - `2600:1f24:66:c000::/52` 86 | - IPv6 subpool (public scope) 87 | - Description: `ipv6-test-usw2` 88 | - Provisioned CIDRs: 89 | - `2600:1f24:66:c000::/56` 90 | - `2600:1f24:66:c100::/56` 91 | 92 | - `us-east-1` (ipam locale) 93 | - IPv4 Pool (private scope) 94 | - Description: `ipv4-test-use1` 95 | - Provisioned CIDRs: 96 | - `10.0.64.0/18` 97 | - `10.1.64.0/20` 98 | - `192.168.64.0/18` 99 | - `192.168.128.0/20` 100 | - IPv6 regional pool (public scope) 101 | - `2600:1f28:3d:c000::/52` 102 | - IPv6 subpool (public scope) 103 | - Description: `ipv6-test-use1` 104 | - Provisioned CIDRs: 105 | - `2600:1f28:3d:c000::/56` 106 | - `2600:1f28:3d:c400::/56` 107 | 108 | ### Build Dual Stack Full Mesh Trio 109 | 1. It begins: 110 | - `terraform init` 111 | 112 | 2. Apply VPCs (must exist before Centralized Routers) and S3 Gateways: 113 | - `terraform apply -target module.vpcs_use1 -target module.vpcs_use2 -target module.vpcs_usw2` 114 | 115 | 3. Apply S3 Gateways, Full Mesh Intra VPC Security Group Rules and IPv6 Full Mesh Intra VPC Security Group Rules (will auto apply it's dependent modules Intra Security Group Rules and IPv6 Intra Security Group Rules for each region) for EC2 access across VPC regions (ie ssh and ping) for VPCs in a TGW Full Mesh configuration. 116 | - `terraform apply -target aws_vpc_endpoint.s3_use1 -target aws_vpc_endpoint.s3_use2 -target aws_vpc_endpoint.s3_usw2 -target module.full_mesh_intra_vpc_security_group_rules -target module.ipv6_full_mesh_intra_vpc_security_group_rules` 117 | 118 | 4. Apply VPC Peering Deluxe and Centralized Routers: 119 | - `terraform apply -target module.vpc_peering_deluxe_usw2_app2_to_usw2_general2 -target module.vpc_peering_deluxe_use1_general3_to_use2_app1 -target module.centralized_router_use1 -target module.centralized_router_use2 -target module.centralized_router_usw2` 120 | 121 | 5. Apply Full Mesh Trio: 122 | - `terraform apply -target module.full_mesh_trio` 123 | 124 | Note: combine steps 3 through 5 with: `terraform apply` 125 | 126 | ### Routing and peering validation with AWS Route Analyzer 127 | - Go to [AWS Network Manager](https://us-west-2.console.aws.amazon.com/networkmanager/home?region=us-east-1#/networks) (free to use) 128 | - Create global network -> `next` 129 | - UNCHECK `Add core network in your global network` or you will be billed extra -> `next` 130 | - Select new global network -> go to `Transit Gateways` -> `Register 131 | Transit Gateway` -> Select TGWs -> `Register Transit Gateway` -> wait until all states say `Available` 132 | - Go to `Transit gateway network` -> `Route Analyzer` 133 | 134 | - IPv4: 135 | - Cross-Region Test 1 (use1a to use2c) 136 | - Source: 137 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 138 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general3-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 139 | - IP Address: `192.168.68.70` (`haproxy1` public subnet) 140 | - Destination: 141 | - Transit Gateway: Choose `TEST-centralized-router-magento-use2` 142 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general1-use2 <-> TEST-centralized-router-magneto-use2` (VPC) 143 | - IP Address: `172.16.132.6` (`jenkins2` private subnet) 144 | - Select `Run Route Analysis` 145 | - Forward and Return Paths should both have a `Connected` status. 146 | - Cross-Region Test 2 (use2b to usw2c) 147 | - Source: 148 | - Transit Gateway: Choose `TEST-centralized-router-magneto-use2` 149 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app1-use2 <-> TEST-centralized-router-magneto-use2` (VPC) 150 | - IP Address: `172.16.76.21` (`other2` public subnet) 151 | - Destination: 152 | - Transit Gateway: Choose `TEST-centralized-router-arch-angel-usw2` 153 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general2-usw2 <-> TEST-centralized-router-arch-angel-usw2` (VPC) 154 | - IP Address: `192.168.11.11` (`util1` private subnet) 155 | - Select `Run Route Analysis` 156 | - Forward and Return Paths should both have a `Connected` status. 157 | - Cross-Region Test 3 (usw2b to use1b) 158 | - Source: 159 | - Transit Gateway: Choose `TEST-centralized-router-arch-angel-usw2` 160 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app2-usw2 <-> TEST-centralized-router-arch-angel-usw2` (VPC) 161 | - IP Address: `10.0.16.16` (`cluster2` private subnet) 162 | - Destination: 163 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 164 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app3-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 165 | - IP Address: `10.1.64.4` (`other1` public subnet) 166 | - Select `Run Route Analysis` 167 | - Forward and Return Paths should both have a `Connected` status. 168 | 169 | - IPv6: 170 | - Cross-Region Test 1 (use1a to use2c) 171 | - Source: 172 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 173 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general3-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 174 | - IP Address: `2600:1f28:3d:c402:0000:0000:0000:0002` (`haproxy1` public subnet) 175 | - Destination: 176 | - Transit Gateway: Choose `TEST-centralized-router-magento-use2` 177 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general1-use2 <-> TEST-centralized-router-magneto-use2` (VPC) 178 | - IP Address: `2600:1f26:21:c103:0000:0000:0000:0003` (`jenkins2` private subnet) 179 | - Select `Run Route Analysis` 180 | - Forward and Return Paths should both have a `Connected` status. 181 | - Cross-Region Test 2 (use2b to usw2c) 182 | - Source: 183 | - Transit Gateway: Choose `TEST-centralized-router-magneto-use2` 184 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app1-use2 <-> TEST-centralized-router-magneto-use2` (VPC) 185 | - IP Address: 186 | `2600:1f26:21:c003:0000:0000:0000:0004` (`other2` public subnet) 187 | - Destination: 188 | - Transit Gateway: Choose `TEST-centralized-router-arch-angel-usw2` 189 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general2-usw2 <-> TEST-centralized-router-arch-angel-usw2` (VPC) 190 | - IP Address: `2600:1f24:66:c101:0000:0000:0000:0005` (`db2` private subnet) 191 | - Select `Run Route Analysis` 192 | - Forward and Return Paths should both have a `Connected` status. 193 | - Cross-Region Test 3 (usw2b to use1b) 194 | - Source: 195 | - Transit Gateway: Choose `TEST-centralized-router-arch-angel-usw2` 196 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app2-usw2 <-> TEST-centralized-router-arch-angel-usw2` (VPC) 197 | - IP Address: `2600:1f24:66:c006:0000:0000:0000:0006` (`cluster2` private subnet) 198 | - Destination: 199 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 200 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app3-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 201 | - IP Address: `2600:1f28:3d:c006:0000:0000:0000:0007` (`other1` public subnet) 202 | - Select `Run Route Analysis` 203 | - Forward and Return Paths should both have a `Connected` status. 204 | 205 | Several other routes can be validated, try them out! 206 | 207 | ### Tear down 208 | - `terraform destroy` (long pause) 209 | - Full teardown (destroy) works for AWS provider 5.61.0 but the VPC destroy in the last step will take about 10-30 min to finish deleting cleanly after waiting for AWS to release IPAM pool CIDRs without error. Now you can immediately rebuild with the same cidrs after the destroy. 210 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/blackhole_cidrs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | blackhole = { 3 | cidrs = ["172.16.65.0/24"] # app1 jenkins1 4 | ipv6_cidrs = ["2600:1f26:21:c400::/64"] # app1 test1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/centralized_router_use1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.6" 4 | 5 | providers = { 6 | aws = aws.use1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "mystique" 13 | amazon_side_asn = 64519 14 | vpcs = module.vpcs_use1 15 | blackhole = local.blackhole 16 | } 17 | } 18 | 19 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/centralized_router_use2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.6" 4 | 5 | providers = { 6 | aws = aws.use2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "magneto" 13 | amazon_side_asn = 64520 14 | vpcs = module.vpcs_use2 15 | blackhole = local.blackhole 16 | } 17 | } 18 | 19 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/centralized_router_usw2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_usw2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.6" 4 | 5 | providers = { 6 | aws = aws.usw2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "arch-angel" 13 | amazon_side_asn = 64521 14 | vpcs = module.vpcs_usw2 15 | blackhole = local.blackhole 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/full_mesh_trio.tf: -------------------------------------------------------------------------------- 1 | module "full_mesh_trio" { 2 | source = "JudeQuintana/full-mesh-trio/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws.one = aws.use1 7 | aws.two = aws.use2 8 | aws.three = aws.usw2 9 | } 10 | 11 | env_prefix = var.env_prefix 12 | full_mesh_trio = { 13 | one = { 14 | centralized_router = module.centralized_router_use1 15 | } 16 | two = { 17 | centralized_router = module.centralized_router_use2 18 | } 19 | three = { 20 | centralized_router = module.centralized_router_usw2 21 | } 22 | } 23 | } 24 | 25 | output "full_mesh_trio" { 26 | value = module.full_mesh_trio 27 | } 28 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/providers.tf: -------------------------------------------------------------------------------- 1 | # base region 2 | provider "aws" { 3 | region = "us-west-2" 4 | } 5 | 6 | provider "aws" { 7 | alias = "use1" 8 | region = "us-east-1" 9 | } 10 | 11 | provider "aws" { 12 | alias = "use2" 13 | region = "us-east-2" 14 | } 15 | 16 | provider "aws" { 17 | alias = "usw2" 18 | region = "us-west-2" 19 | } 20 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/security_group_rules.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # allow all ssh and ping communication between all VPCs within each region's intra-vpc security group 3 | security_group_rules = [ 4 | { 5 | label = "ssh" 6 | protocol = "tcp" 7 | from_port = 22 8 | to_port = 22 9 | }, 10 | { 11 | label = "ping" 12 | protocol = "icmp" 13 | from_port = 8 14 | to_port = 0 15 | } 16 | ] 17 | 18 | intra_vpc_security_group_rules = { for r in local.security_group_rules : r.label => r } 19 | } 20 | 21 | module "intra_vpc_security_group_rules_use1" { 22 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 23 | version = "1.0.1" 24 | 25 | providers = { 26 | aws = aws.use1 27 | } 28 | 29 | for_each = local.intra_vpc_security_group_rules 30 | 31 | env_prefix = var.env_prefix 32 | region_az_labels = var.region_az_labels 33 | intra_vpc_security_group_rule = { 34 | rule = each.value 35 | vpcs = module.vpcs_use1 36 | } 37 | } 38 | 39 | module "intra_vpc_security_group_rules_use2" { 40 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 41 | version = "1.0.1" 42 | 43 | providers = { 44 | aws = aws.use2 45 | } 46 | 47 | for_each = local.intra_vpc_security_group_rules 48 | 49 | env_prefix = var.env_prefix 50 | region_az_labels = var.region_az_labels 51 | intra_vpc_security_group_rule = { 52 | rule = each.value 53 | vpcs = module.vpcs_use2 54 | } 55 | } 56 | 57 | module "intra_vpc_security_group_rules_usw2" { 58 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 59 | version = "1.0.1" 60 | 61 | providers = { 62 | aws = aws.usw2 63 | } 64 | 65 | for_each = local.intra_vpc_security_group_rules 66 | 67 | env_prefix = var.env_prefix 68 | region_az_labels = var.region_az_labels 69 | intra_vpc_security_group_rule = { 70 | rule = each.value 71 | vpcs = module.vpcs_usw2 72 | } 73 | } 74 | 75 | ## allow all ssh and ping communication between all VPCs across regions in each intra-vpc security group 76 | module "full_mesh_intra_vpc_security_group_rules" { 77 | source = "JudeQuintana/full-mesh-intra-vpc-security-group-rules/aws" 78 | version = "1.0.1" 79 | 80 | providers = { 81 | aws.one = aws.use1 82 | aws.two = aws.use2 83 | aws.three = aws.usw2 84 | } 85 | 86 | env_prefix = var.env_prefix 87 | region_az_labels = var.region_az_labels 88 | full_mesh_intra_vpc_security_group_rules = { 89 | one = { 90 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use1 91 | } 92 | two = { 93 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use2 94 | } 95 | three = { 96 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_usw2 97 | } 98 | } 99 | } 100 | 101 | # IPv6 102 | locals { 103 | ipv6_security_group_rules = [ 104 | { 105 | label = "ssh6" 106 | protocol = "tcp" 107 | from_port = 22 108 | to_port = 22 109 | }, 110 | { 111 | label = "ping6" 112 | protocol = "icmpv6" 113 | from_port = -1 114 | to_port = -1 115 | } 116 | ] 117 | 118 | ipv6_intra_vpc_security_group_rules = { for r in local.ipv6_security_group_rules : r.label => r } 119 | } 120 | 121 | module "ipv6_intra_vpc_security_group_rules_use1" { 122 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 123 | version = "1.0.1" 124 | 125 | providers = { 126 | aws = aws.use1 127 | } 128 | 129 | for_each = local.ipv6_intra_vpc_security_group_rules 130 | 131 | env_prefix = var.env_prefix 132 | region_az_labels = var.region_az_labels 133 | ipv6_intra_vpc_security_group_rule = { 134 | rule = each.value 135 | vpcs = module.vpcs_use1 136 | } 137 | } 138 | 139 | module "ipv6_intra_vpc_security_group_rules_use2" { 140 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 141 | version = "1.0.1" 142 | 143 | providers = { 144 | aws = aws.use2 145 | } 146 | 147 | for_each = local.ipv6_intra_vpc_security_group_rules 148 | 149 | env_prefix = var.env_prefix 150 | region_az_labels = var.region_az_labels 151 | ipv6_intra_vpc_security_group_rule = { 152 | rule = each.value 153 | vpcs = module.vpcs_use2 154 | } 155 | } 156 | 157 | module "ipv6_intra_vpc_security_group_rules_usw2" { 158 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 159 | version = "1.0.1" 160 | 161 | providers = { 162 | aws = aws.usw2 163 | } 164 | 165 | for_each = local.ipv6_intra_vpc_security_group_rules 166 | 167 | env_prefix = var.env_prefix 168 | region_az_labels = var.region_az_labels 169 | ipv6_intra_vpc_security_group_rule = { 170 | rule = each.value 171 | vpcs = module.vpcs_usw2 172 | } 173 | } 174 | 175 | module "ipv6_full_mesh_intra_vpc_security_group_rules" { 176 | source = "JudeQuintana/ipv6-full-mesh-intra-vpc-security-group-rules/aws" 177 | version = "1.0.0" 178 | 179 | providers = { 180 | aws.one = aws.use1 181 | aws.two = aws.use2 182 | aws.three = aws.usw2 183 | } 184 | 185 | env_prefix = var.env_prefix 186 | region_az_labels = var.region_az_labels 187 | ipv6_full_mesh_intra_vpc_security_group_rules = { 188 | one = { 189 | ipv6_intra_vpc_security_group_rules = module.ipv6_intra_vpc_security_group_rules_use1 190 | } 191 | two = { 192 | ipv6_intra_vpc_security_group_rules = module.ipv6_intra_vpc_security_group_rules_use2 193 | } 194 | three = { 195 | ipv6_intra_vpc_security_group_rules = module.ipv6_intra_vpc_security_group_rules_usw2 196 | } 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "region_az_labels" { 8 | description = "Update this map with regions and AZs that will be in use for short name labeling" 9 | type = map(string) 10 | default = { 11 | us-west-2 = "usw2" 12 | us-west-2a = "usw2a" 13 | us-west-2b = "usw2b" 14 | us-west-2c = "usw2c" 15 | us-east-1 = "use1" 16 | us-east-1a = "use1a" 17 | us-east-1b = "use1b" 18 | us-east-1c = "use1c" 19 | us-east-2 = "use2" 20 | us-east-2a = "use2a" 21 | us-east-2b = "use2b" 22 | us-east-2c = "use2c" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpc_endpoint_service_name_fmt = "com.amazonaws.%s.s3" 4 | vpc_endpoint_type = "Gateway" 5 | 6 | vpcs_use1_with_private_route_table_ids = { for this in module.vpcs_use1 : this.name => this if length(this.private_route_table_ids) > 0 } 7 | vpcs_use2_with_private_route_table_ids = { for this in module.vpcs_use2 : this.name => this if length(this.private_route_table_ids) > 0 } 8 | vpcs_usw2_with_private_route_table_ids = { for this in module.vpcs_usw2 : this.name => this if length(this.private_route_table_ids) > 0 } 9 | } 10 | 11 | resource "aws_vpc_endpoint" "s3_use1" { 12 | provider = aws.use1 13 | 14 | for_each = local.vpcs_use1_with_private_route_table_ids 15 | 16 | vpc_id = each.value.id 17 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 18 | vpc_endpoint_type = local.vpc_endpoint_type 19 | route_table_ids = each.value.private_route_table_ids 20 | } 21 | 22 | resource "aws_vpc_endpoint" "s3_use2" { 23 | provider = aws.use2 24 | 25 | for_each = local.vpcs_use2_with_private_route_table_ids 26 | 27 | vpc_id = each.value.id 28 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 29 | vpc_endpoint_type = local.vpc_endpoint_type 30 | route_table_ids = each.value.private_route_table_ids 31 | } 32 | 33 | resource "aws_vpc_endpoint" "s3_usw2" { 34 | provider = aws.usw2 35 | 36 | for_each = local.vpcs_usw2_with_private_route_table_ids 37 | 38 | vpc_id = each.value.id 39 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 40 | vpc_endpoint_type = local.vpc_endpoint_type 41 | route_table_ids = each.value.private_route_table_ids 42 | } 43 | 44 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/vpc_peering.tf: -------------------------------------------------------------------------------- 1 | # cross region peering, only route specific subnets across peering connection 2 | # more cost effective vs using transit gateway for cross region traffic 3 | module "vpc_peering_deluxe_use1_general3_to_use2_app1" { 4 | source = "JudeQuintana/vpc-peering-deluxe/aws" 5 | version = "1.0.1" 6 | 7 | providers = { 8 | aws.local = aws.use1 9 | aws.peer = aws.use2 10 | } 11 | 12 | env_prefix = var.env_prefix 13 | vpc_peering_deluxe = { 14 | local = { 15 | vpc = lookup(module.vpcs_use1, "general3") 16 | only_route = { 17 | subnet_cidrs = ["192.168.65.0/24"] 18 | ipv6_subnet_cidrs = ["2600:1f28:3d:c400::/64"] 19 | } 20 | } 21 | peer = { 22 | vpc = lookup(module.vpcs_use2, "app1") 23 | only_route = { 24 | subnet_cidrs = ["172.16.68.0/28"] 25 | ipv6_subnet_cidrs = ["2600:1f26:21:c004::/64"] 26 | } 27 | } 28 | } 29 | } 30 | 31 | # intra region vpc peering, route all subnets across peering connection 32 | # more cost effective vs using transit gateway when cidr traffic is within same AZ. 33 | module "vpc_peering_deluxe_usw2_app2_to_usw2_general2" { 34 | source = "JudeQuintana/vpc-peering-deluxe/aws" 35 | version = "1.0.1" 36 | 37 | providers = { 38 | aws.local = aws.usw2 39 | aws.peer = aws.usw2 40 | } 41 | 42 | env_prefix = var.env_prefix 43 | vpc_peering_deluxe = { 44 | local = { 45 | vpc = lookup(module.vpcs_usw2, "app2") 46 | } 47 | peer = { 48 | vpc = lookup(module.vpcs_usw2, "general2") 49 | } 50 | } 51 | } 52 | 53 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/vpcs_use1.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | # main ipam in usw2 with a pool for use1 locale 3 | data "aws_vpc_ipam_pool" "ipv4_use1" { 4 | provider = aws.use1 5 | 6 | filter { 7 | name = "description" 8 | values = ["ipv4-test-use1"] 9 | } 10 | filter { 11 | name = "address-family" 12 | values = ["ipv4"] 13 | } 14 | } 15 | 16 | data "aws_vpc_ipam_pool" "ipv6_use1" { 17 | provider = aws.use1 18 | 19 | filter { 20 | name = "description" 21 | values = ["ipv6-test-use1"] 22 | } 23 | filter { 24 | name = "address-family" 25 | values = ["ipv6"] 26 | } 27 | } 28 | 29 | locals { 30 | ipv4_ipam_pool_use1 = data.aws_vpc_ipam_pool.ipv4_use1 31 | ipv6_ipam_pool_use1 = data.aws_vpc_ipam_pool.ipv6_use1 32 | 33 | tiered_vpcs_use1 = [ 34 | { 35 | name = "app3" 36 | ipv4 = { 37 | network_cidr = "10.0.64.0/18" 38 | secondary_cidrs = ["10.1.64.0/20"] 39 | ipam_pool = local.ipv4_ipam_pool_use1 40 | } 41 | ipv6 = { 42 | network_cidr = "2600:1f28:3d:c000::/56" 43 | ipam_pool = local.ipv6_ipam_pool_use1 44 | } 45 | azs = { 46 | a = { 47 | private_subnets = [ 48 | { name = "cluster1", cidr = "10.0.64.0/24", ipv6_cidr = "2600:1f28:3d:c000::/64" } 49 | ] 50 | # Enable a NAT Gateway for all private subnets in the same AZ 51 | # by adding the "natgw = true" attribute to any public subnet 52 | public_subnets = [ 53 | { name = "random1", cidr = "10.0.66.0/28", ipv6_cidr = "2600:1f28:3d:c001::/64", special = true }, 54 | { name = "haproxy1", cidr = "10.0.67.64/26", ipv6_cidr = "2600:1f28:3d:c002::/64" } 55 | ] 56 | } 57 | b = { 58 | private_subnets = [ 59 | { name = "cluster2", cidr = "10.0.70.0/24", ipv6_cidr = "2600:1f28:3d:c003::/64" } 60 | ] 61 | public_subnets = [ 62 | { name = "random2", cidr = "10.0.72.0/28", ipv6_cidr = "2600:1f28:3d:c004::/64", special = true }, 63 | { name = "haproxy2", cidr = "10.0.73.64/26", ipv6_cidr = "2600:1f28:3d:c005::/64" }, 64 | #secondary subnet 65 | { name = "other1", cidr = "10.1.64.0/24", ipv6_cidr = "2600:1f28:3d:c006::/64" } 66 | ] 67 | } 68 | } 69 | }, 70 | { 71 | name = "general3" 72 | ipv4 = { 73 | network_cidr = "192.168.64.0/18" 74 | secondary_cidrs = ["192.168.128.0/20"] 75 | ipam_pool = local.ipv4_ipam_pool_use1 76 | } 77 | ipv6 = { 78 | network_cidr = "2600:1f28:3d:c400::/56" 79 | ipam_pool = local.ipv6_ipam_pool_use1 80 | } 81 | azs = { 82 | a = { 83 | private_subnets = [ 84 | { name = "cluster4", cidr = "192.168.65.0/24", ipv6_cidr = "2600:1f28:3d:c400::/64" } 85 | ] 86 | public_subnets = [ 87 | { name = "random2", cidr = "192.168.67.0/28", ipv6_cidr = "2600:1f28:3d:c401::/64", special = true }, 88 | { name = "haproxy1", cidr = "192.168.68.64/26", ipv6_cidr = "2600:1f28:3d:c402::/64" } 89 | ] 90 | } 91 | c = { 92 | private_subnets = [ 93 | { name = "experiment1", cidr = "192.168.70.0/24", ipv6_cidr = "2600:1f28:3d:c403::/64" } 94 | ] 95 | public_subnets = [ 96 | { name = "random3", cidr = "192.168.71.0/28", ipv6_cidr = "2600:1f28:3d:c404::/64", special = true }, 97 | { name = "haproxy3", cidr = "192.168.72.64/26", ipv6_cidr = "2600:1f28:3d:c405::/64" }, 98 | # secondary subnet 99 | { name = "haproxy2", cidr = "192.168.128.0/24", ipv6_cidr = "2600:1f28:3d:c406::/64" } 100 | ] 101 | } 102 | } 103 | } 104 | ] 105 | } 106 | 107 | module "vpcs_use1" { 108 | source = "JudeQuintana/tiered-vpc-ng/aws" 109 | version = "1.0.7" 110 | 111 | providers = { 112 | aws = aws.use1 113 | } 114 | 115 | for_each = { for t in local.tiered_vpcs_use1 : t.name => t } 116 | 117 | env_prefix = var.env_prefix 118 | region_az_labels = var.region_az_labels 119 | tiered_vpc = each.value 120 | } 121 | 122 | output "vpcs_use1_natgw_eips_per_az" { 123 | value = { for v in module.vpcs_use1 : v.name => v.public_natgw_az_to_eip } 124 | } 125 | 126 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/vpcs_use2.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | # main ipam in usw2 with a pool for use2 locale 3 | data "aws_vpc_ipam_pool" "ipv4_use2" { 4 | provider = aws.use2 5 | 6 | filter { 7 | name = "description" 8 | values = ["ipv4-test-use2"] 9 | } 10 | filter { 11 | name = "address-family" 12 | values = ["ipv4"] 13 | } 14 | } 15 | 16 | data "aws_vpc_ipam_pool" "ipv6_use2" { 17 | provider = aws.use2 18 | 19 | filter { 20 | name = "description" 21 | values = ["ipv6-test-use2"] 22 | } 23 | filter { 24 | name = "address-family" 25 | values = ["ipv6"] 26 | } 27 | } 28 | 29 | locals { 30 | ipv4_ipam_pool_use2 = data.aws_vpc_ipam_pool.ipv4_use2 31 | ipv6_ipam_pool_use2 = data.aws_vpc_ipam_pool.ipv6_use2 32 | 33 | tiered_vpcs_use2 = [ 34 | { 35 | name = "app1" 36 | ipv4 = { 37 | network_cidr = "172.16.64.0/18" 38 | secondary_cidrs = ["172.16.192.0/20"] 39 | ipam_pool = local.ipv4_ipam_pool_use2 40 | } 41 | ipv6 = { 42 | network_cidr = "2600:1f26:21:c000::/56" 43 | secondary_cidrs = ["2600:1f26:21:c400::/56"] 44 | ipam_pool = local.ipv6_ipam_pool_use2 45 | } 46 | azs = { 47 | b = { 48 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 49 | private_subnets = [ 50 | { name = "jenkins1", cidr = "172.16.65.0/24", ipv6_cidr = "2600:1f26:21:c001::/64" }, 51 | # secondary cidr 52 | { name = "experiment1", cidr = "172.16.192.0/24", ipv6_cidr = "2600:1f26:21:c004::/64" } 53 | ] 54 | public_subnets = [ 55 | { name = "other", cidr = "172.16.68.0/28", ipv6_cidr = "2600:1f26:21:c002::/64", special = true }, 56 | { name = "other2", cidr = "172.16.76.16/28", ipv6_cidr = "2600:1f26:21:c003::/64" }, 57 | # ipv6 secondary cidr 58 | { name = "test1", cidr = "172.16.77.32/28", ipv6_cidr = "2600:1f26:21:c400::/60" }, 59 | #ipv4 secondary cidr and ipv6 secondary cidr 60 | { name = "test2", cidr = "172.16.194.0/24", ipv6_cidr = "2600:1f26:21:c410::/60" } 61 | ] 62 | } 63 | } 64 | }, 65 | { 66 | name = "general1" 67 | ipv4 = { 68 | network_cidr = "172.16.128.0/18" 69 | secondary_cidrs = ["172.16.208.0/20"] 70 | ipam_pool = local.ipv4_ipam_pool_use2 71 | } 72 | ipv6 = { 73 | network_cidr = "2600:1f26:21:c100::/56" 74 | ipam_pool = local.ipv6_ipam_pool_use2 75 | } 76 | azs = { 77 | a = { 78 | private_subnets = [ 79 | { name = "artifacts2", cidr = "172.16.129.0/24", ipv6_cidr = "2600:1f26:21:c101::/64" } 80 | ] 81 | public_subnets = [ 82 | { name = "random1", cidr = "172.16.131.0/28", ipv6_cidr = "2600:1f26:21:c102::/64", special = true } 83 | ] 84 | } 85 | c = { 86 | private_subnets = [ 87 | { name = "jenkins2", cidr = "172.16.132.0/24", ipv6_cidr = "2600:1f26:21:c103::/64", special = true } 88 | ] 89 | public_subnets = [ 90 | { name = "random2", cidr = "172.16.133.0/28", ipv6_cidr = "2600:1f26:21:c104::/64" }, 91 | # secondary cidr 92 | { name = "random3", cidr = "172.16.208.0/24", ipv6_cidr = "2600:1f26:21:c105::/64" } 93 | ] 94 | } 95 | } 96 | } 97 | ] 98 | } 99 | 100 | module "vpcs_use2" { 101 | source = "JudeQuintana/tiered-vpc-ng/aws" 102 | version = "1.0.7" 103 | 104 | providers = { 105 | aws = aws.use2 106 | } 107 | 108 | for_each = { for t in local.tiered_vpcs_use2 : t.name => t } 109 | 110 | env_prefix = var.env_prefix 111 | region_az_labels = var.region_az_labels 112 | tiered_vpc = each.value 113 | } 114 | 115 | output "vpcs_use2_natgw_eips_per_az" { 116 | value = { for v in module.vpcs_use2 : v.name => v.public_natgw_az_to_eip } 117 | } 118 | -------------------------------------------------------------------------------- /dual_stack_full_mesh_trio_demo/vpcs_usw2.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | # main ipam in usw2 with a pool for usw2 locale 3 | data "aws_vpc_ipam_pool" "ipv4_usw2" { 4 | provider = aws.usw2 5 | 6 | filter { 7 | name = "description" 8 | values = ["ipv4-test-usw2"] 9 | } 10 | filter { 11 | name = "address-family" 12 | values = ["ipv4"] 13 | } 14 | } 15 | 16 | data "aws_vpc_ipam_pool" "ipv6_usw2" { 17 | provider = aws.usw2 18 | 19 | filter { 20 | name = "description" 21 | values = ["ipv6-test-usw2"] 22 | } 23 | filter { 24 | name = "address-family" 25 | values = ["ipv6"] 26 | } 27 | } 28 | 29 | locals { 30 | ipv4_ipam_pool_usw2 = data.aws_vpc_ipam_pool.ipv4_usw2 31 | ipv6_ipam_pool_usw2 = data.aws_vpc_ipam_pool.ipv6_usw2 32 | } 33 | 34 | # ipv4 and ipv6 must use an ipam pool 35 | # can start with ipv4 only and then add ipv6 later if needed. 36 | # vpcs with an ipv4 network cidr /18 provides /20 subnet for each AZ. 37 | locals { 38 | tiered_vpcs_usw2 = [ 39 | { 40 | name = "app2" 41 | ipv4 = { 42 | network_cidr = "10.0.0.0/18" 43 | secondary_cidrs = ["10.1.0.0/20"] 44 | ipam_pool = local.ipv4_ipam_pool_usw2 45 | } 46 | ipv6 = { 47 | network_cidr = "2600:1f24:66:c000::/56" 48 | ipam_pool = local.ipv6_ipam_pool_usw2 49 | } 50 | azs = { 51 | a = { 52 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 53 | private_subnets = [ 54 | { name = "another", cidr = "10.0.9.0/24", ipv6_cidr = "2600:1f24:66:c008::/64" } 55 | ] 56 | # Enable a NAT Gateway for all private subnets in the same AZ 57 | # by adding the `natgw = true` attribute to any public subnet 58 | # `special` and `natgw` can also be enabled together on a public subnet 59 | public_subnets = [ 60 | { name = "random1", cidr = "10.0.3.0/28", ipv6_cidr = "2600:1f24:66:c000::/64" }, 61 | { name = "haproxy1", cidr = "10.0.4.0/26", ipv6_cidr = "2600:1f24:66:c001::/64" }, 62 | { name = "other", cidr = "10.0.10.0/28", ipv6_cidr = "2600:1f24:66:c002::/64", special = true } 63 | ] 64 | isolated_subnets = [ 65 | # secondary cidr 66 | { name = "hidden1", cidr = "10.1.13.0/24", ipv6_cidr = "2600:1f24:66:c050::/60" } 67 | ] 68 | } 69 | b = { 70 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 71 | private_subnets = [ 72 | { name = "cluster2", cidr = "10.0.16.0/24", ipv6_cidr = "2600:1f24:66:c006::/64" }, 73 | { name = "random2", cidr = "10.0.17.0/24", ipv6_cidr = "2600:1f24:66:c007::/64", special = true }, 74 | # secondary subnet 75 | { name = "random3", cidr = "10.1.0.0/24", ipv6_cidr = "2600:1f24:66:c009::/64" } 76 | ] 77 | } 78 | } 79 | }, 80 | { 81 | name = "general2" 82 | ipv4 = { 83 | network_cidr = "192.168.0.0/18" 84 | secondary_cidrs = ["192.168.144.0/20"] 85 | ipam_pool = local.ipv4_ipam_pool_usw2 86 | } 87 | ipv6 = { 88 | network_cidr = "2600:1f24:66:c100::/56" 89 | ipam_pool = local.ipv6_ipam_pool_usw2 90 | } 91 | azs = { 92 | c = { 93 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 94 | private_subnets = [ 95 | { name = "util2", cidr = "192.168.10.0/24", ipv6_cidr = "2600:1f24:66:c100::/64", special = true }, 96 | { name = "util1", cidr = "192.168.11.0/24", ipv6_cidr = "2600:1f24:66:c101::/64" } 97 | ] 98 | public_subnets = [ 99 | { name = "other2", cidr = "192.168.14.0/28", ipv6_cidr = "2600:1f24:66:c108::/64" }, 100 | # secondary subnet 101 | { name = "other3", cidr = "192.168.144.0/24", ipv6_cidr = "2600:1f24:66:c109::/64" } 102 | ] 103 | } 104 | } 105 | } 106 | ] 107 | } 108 | 109 | module "vpcs_usw2" { 110 | source = "JudeQuintana/tiered-vpc-ng/aws" 111 | version = "1.0.7" 112 | 113 | providers = { 114 | aws = aws.usw2 115 | } 116 | 117 | for_each = { for t in local.tiered_vpcs_usw2 : t.name => t } 118 | 119 | env_prefix = var.env_prefix 120 | region_az_labels = var.region_az_labels 121 | tiered_vpc = each.value 122 | } 123 | 124 | output "vpcs_usw2_natgw_eips_per_az" { 125 | value = { for v in module.vpcs_usw2 : v.name => v.public_natgw_az_to_eip } 126 | } 127 | 128 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.61.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/centralized_router.tf: -------------------------------------------------------------------------------- 1 | # This TGW Centralized router module will attach all dual stack vpcs (attachment for each AZ) to one TGW 2 | # and route to each other for the VPC IPv4 network cidrs, IPv4 secondary cidrs and IPv6 cidrs. 3 | # hub and spoke 4 | module "centralized_router" { 5 | source = "JudeQuintana/centralized-router/aws" 6 | version = "1.0.6" 7 | 8 | env_prefix = var.env_prefix 9 | region_az_labels = var.region_az_labels 10 | centralized_router = { 11 | name = "gambit" 12 | amazon_side_asn = 64512 13 | vpcs = module.vpcs 14 | blackhole = { 15 | cidrs = ["172.16.8.0/24"] 16 | ipv6_cidrs = ["2600:1f24:66:c109::/64"] 17 | } 18 | } 19 | } 20 | 21 | output "centralized_router" { 22 | value = module.centralized_router 23 | } 24 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/instances.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # create a vpc_name map so I can call vpcs by name so i dont have 3 | # to type it as a string in places that i need it. 4 | # ie local.tiered_vpc_names.app will be "app" 5 | tiered_vpc_names = { for v in module.vpcs : v.name => v.name } 6 | 7 | instances = [ 8 | { 9 | # app-public 10 | name = format("%s-public", local.tiered_vpc_names.app) 11 | # lookup the public subnet id for the 'random1' subnet in the 'a' AZ for the 'app' VPC 12 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.app).public_subnet_name_to_subnet_id, "random1") 13 | vpc_security_group_ids = [ 14 | lookup(module.vpcs, local.tiered_vpc_names.app).default_security_group_id, 15 | lookup(module.vpcs, local.tiered_vpc_names.app).intra_vpc_security_group_id 16 | ] 17 | }, 18 | { 19 | # app-isolated 20 | name = format("%s-isolated", local.tiered_vpc_names.app) 21 | # lookup the private subnet id for the 'jenkins1' subnet in AZ 'b' for the 'cicd' VPC 22 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.app).isolated_subnet_name_to_subnet_id, "hidden1") 23 | vpc_security_group_ids = [ 24 | lookup(module.vpcs, local.tiered_vpc_names.app).default_security_group_id, 25 | lookup(module.vpcs, local.tiered_vpc_names.app).intra_vpc_security_group_id 26 | ] 27 | }, 28 | { 29 | # cicd-private 30 | name = format("%s-private", local.tiered_vpc_names.cicd) 31 | # lookup the private subnet id for the 'jenkins1' subnet in AZ 'b' for the 'cicd' VPC 32 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.cicd).private_subnet_name_to_subnet_id, "jenkins1") 33 | vpc_security_group_ids = [ 34 | lookup(module.vpcs, local.tiered_vpc_names.cicd).default_security_group_id, 35 | lookup(module.vpcs, local.tiered_vpc_names.cicd).intra_vpc_security_group_id 36 | ] 37 | }, 38 | { 39 | # general-private 40 | name = format("%s-private", local.tiered_vpc_names.general) 41 | # lookup the private subnet id for the 'util2' subnet in AZ 'c' for the 'general' VPC 42 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.general).private_subnet_name_to_subnet_id, "util2") 43 | vpc_security_group_ids = [ 44 | lookup(module.vpcs, local.tiered_vpc_names.general).default_security_group_id, 45 | lookup(module.vpcs, local.tiered_vpc_names.general).intra_vpc_security_group_id 46 | ] 47 | } 48 | ] 49 | } 50 | 51 | data "aws_ami" "al2023" { 52 | owners = ["amazon"] 53 | most_recent = true 54 | 55 | filter { 56 | name = "name" 57 | values = ["amzn2-ami-kernel-5.10-hvm-2.0.*"] 58 | } 59 | 60 | filter { 61 | name = "architecture" 62 | values = ["x86_64"] 63 | } 64 | } 65 | 66 | # The .ssh/config is forwarding the private key to any host 67 | # so you can easily ssh to each instance since instances are 68 | # ssh key only. 69 | # It's a very insecure configuration and is used just for this demo 70 | # and shouldn't be used in production. 71 | resource "aws_instance" "instances" { 72 | for_each = { for i in local.instances : i.name => i } 73 | 74 | ami = data.aws_ami.al2023.id 75 | instance_type = var.base_ec2_instance_attributes.instance_type 76 | key_name = var.base_ec2_instance_attributes.key_name 77 | subnet_id = each.value.subnet_id 78 | ipv6_address_count = 1 79 | vpc_security_group_ids = each.value.vpc_security_group_ids 80 | user_data = < r } 27 | 28 | env_prefix = var.env_prefix 29 | region_az_labels = var.region_az_labels 30 | intra_vpc_security_group_rule = { 31 | rule = each.value 32 | vpcs = module.vpcs 33 | } 34 | } 35 | 36 | # IPv6 37 | locals { 38 | ipv6_intra_vpc_security_group_rules = [ 39 | { 40 | label = "ssh6" 41 | protocol = "tcp" 42 | from_port = 22 43 | to_port = 22 44 | }, 45 | { 46 | label = "ping6" 47 | protocol = "icmpv6" 48 | from_port = -1 49 | to_port = -1 50 | } 51 | ] 52 | } 53 | 54 | module "ipv6_intra_vpc_security_group_rules" { 55 | source = "JudeQuintana/ipv6-intra-vpc-security-group-rule/aws" 56 | version = "1.0.1" 57 | 58 | for_each = { for r in local.ipv6_intra_vpc_security_group_rules : r.label => r } 59 | 60 | env_prefix = var.env_prefix 61 | region_az_labels = var.region_az_labels 62 | ipv6_intra_vpc_security_group_rule = { 63 | rule = each.value 64 | vpcs = module.vpcs 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "base_ec2_instance_attributes" { 8 | description = "base attributes for building in us-west-2" 9 | type = object({ 10 | key_name = string 11 | instance_type = string 12 | }) 13 | default = { 14 | key_name = "my-ec2-key" # EC2 key pair name to use when launching an instance in us-west-2 15 | instance_type = "t2.micro" 16 | } 17 | } 18 | 19 | variable "region_az_labels" { 20 | description = "Update this map with regions and AZs that will be in use for short name labeling" 21 | type = map(string) 22 | default = { 23 | us-west-2 = "usw2" 24 | us-west-2a = "usw2a" 25 | us-west-2b = "usw2b" 26 | us-west-2c = "usw2c" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpcs_with_private_route_table_ids = { for this in module.vpcs : this.name => this if length(this.private_route_table_ids) > 0 } 4 | } 5 | 6 | resource "aws_vpc_endpoint" "s3" { 7 | for_each = local.vpcs_with_private_route_table_ids 8 | 9 | vpc_id = each.value.id 10 | service_name = format("com.amazonaws.%s.s3", each.value.region) 11 | vpc_endpoint_type = "Gateway" 12 | route_table_ids = each.value.private_route_table_ids 13 | } 14 | 15 | -------------------------------------------------------------------------------- /dual_stack_networking_trifecta_demo/vpcs.tf: -------------------------------------------------------------------------------- 1 | # ipam was set up manually (advanced tier) 2 | data "aws_vpc_ipam_pool" "ipv4" { 3 | filter { 4 | name = "description" 5 | values = ["ipv4-test-usw2"] 6 | } 7 | filter { 8 | name = "address-family" 9 | values = ["ipv4"] 10 | } 11 | } 12 | 13 | data "aws_vpc_ipam_pool" "ipv6" { 14 | filter { 15 | name = "description" 16 | values = ["ipv6-test-usw2"] 17 | } 18 | filter { 19 | name = "address-family" 20 | values = ["ipv6"] 21 | } 22 | } 23 | 24 | locals { 25 | ipv4_ipam_pool = data.aws_vpc_ipam_pool.ipv4 26 | ipv6_ipam_pool = data.aws_vpc_ipam_pool.ipv6 27 | 28 | # vpcs with an ipv4 network cidr /18 provides /20 subnet for each AZ. 29 | tiered_vpcs = [ 30 | { 31 | name = "app" 32 | ipv4 = { 33 | network_cidr = "10.0.0.0/18" 34 | secondary_cidrs = ["10.1.0.0/20"] 35 | ipam_pool = local.ipv4_ipam_pool 36 | } 37 | ipv6 = { 38 | network_cidr = "2600:1f24:66:c000::/56" 39 | secondary_cidrs = ["2600:1f24:66:c800::/56"] 40 | ipam_pool = local.ipv6_ipam_pool 41 | } 42 | azs = { 43 | a = { 44 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 45 | private_subnets = [ 46 | { name = "another", cidr = "10.0.9.0/24", ipv6_cidr = "2600:1f24:66:c008::/64" }, 47 | # secondary ipv6 cidr 48 | { name = "another2", cidr = "10.0.14.0/24", ipv6_cidr = "2600:1f24:66:c810::/60" }, 49 | # secondary cidr and secondary ipv6 cidr 50 | { name = "another3", cidr = "10.1.15.0/24", ipv6_cidr = "2600:1f24:66:c820::/60" } 51 | ] 52 | # Enable a NAT Gateway for all private subnets in the same AZ 53 | # by adding the "natgw = true" attribute to any public subnet 54 | # "special" and "natgw" can also be enabled together on a public subnet 55 | public_subnets = [ 56 | { name = "random1", cidr = "10.0.3.0/28", ipv6_cidr = "2600:1f24:66:c000::/64" }, 57 | { name = "haproxy1", cidr = "10.0.4.0/26", ipv6_cidr = "2600:1f24:66:c001::/64" }, 58 | { name = "other", cidr = "10.0.10.0/28", ipv6_cidr = "2600:1f24:66:c002::/64", special = true } 59 | ] 60 | isolated_subnets = [ 61 | # secondary cidr 62 | { name = "hidden1", cidr = "10.1.13.0/24", ipv6_cidr = "2600:1f24:66:c850::/60" } 63 | ] 64 | } 65 | b = { 66 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 67 | private_subnets = [ 68 | { name = "cluster2", cidr = "10.0.16.0/24", ipv6_cidr = "2600:1f24:66:c006::/64" }, 69 | { name = "random2", cidr = "10.0.17.0/24", ipv6_cidr = "2600:1f24:66:c007::/64" }, 70 | # special can be assigned to a secondary cidr subnet and be used as a vpc attachemnt when passed to centralized router 71 | { name = "random3", cidr = "10.1.5.0/24", ipv6_cidr = "2600:1f24:66:c009::/64", special = true } 72 | ] 73 | } 74 | } 75 | }, 76 | { 77 | name = "general" 78 | ipv4 = { 79 | network_cidr = "192.168.0.0/18" 80 | ipam_pool = local.ipv4_ipam_pool 81 | } 82 | ipv6 = { 83 | network_cidr = "2600:1f24:66:c100::/56" 84 | ipam_pool = local.ipv6_ipam_pool 85 | } 86 | azs = { 87 | c = { 88 | #eigw = true # opt-in ipv6 private subnets to route out eigw per az 89 | private_subnets = [ 90 | { name = "util2", cidr = "192.168.10.0/24", ipv6_cidr = "2600:1f24:66:c100::/64", special = true }, 91 | { name = "util1", cidr = "192.168.11.0/24", ipv6_cidr = "2600:1f24:66:c101::/64" } 92 | ] 93 | public_subnets = [ 94 | { name = "other2", cidr = "192.168.14.0/28", ipv6_cidr = "2600:1f24:66:c108::/64" } 95 | ] 96 | } 97 | } 98 | }, 99 | { 100 | name = "cicd" 101 | ipv4 = { 102 | network_cidr = "172.16.0.0/18" 103 | secondary_cidrs = ["172.19.0.0/20"] # aws recommends not using 172.17.0.0/16 104 | ipam_pool = local.ipv4_ipam_pool 105 | } 106 | ipv6 = { 107 | network_cidr = "2600:1f24:66:c200::/56" 108 | secondary_cidrs = ["2600:1f24:66:c600::/56"] 109 | ipam_pool = local.ipv6_ipam_pool 110 | } 111 | azs = { 112 | b = { 113 | eigw = true # opt-in ipv6 private subnets to route out eigw per az 114 | private_subnets = [ 115 | { name = "jenkins1", cidr = "172.16.5.0/24", ipv6_cidr = "2600:1f24:66:c200::/64" }, 116 | { name = "experiment1", cidr = "172.19.5.0/24", ipv6_cidr = "2600:1f24:66:c202::/64" }, 117 | # secondary ipv6 cidr 118 | { name = "experiment2", cidr = "172.19.9.0/24", ipv6_cidr = "2600:1f24:66:c602::/64" } 119 | ] 120 | public_subnets = [ 121 | { name = "other", cidr = "172.16.8.0/28", ipv6_cidr = "2600:1f24:66:c207::/64", special = true }, 122 | # build natgw in public subnet for private ipv4 subnets to route out igw per az 123 | { name = "natgw", cidr = "172.16.16.16/28", ipv6_cidr = "2600:1f24:66:c208::/64", natgw = true } 124 | ] 125 | } 126 | } 127 | } 128 | ] 129 | } 130 | 131 | module "vpcs" { 132 | source = "JudeQuintana/tiered-vpc-ng/aws" 133 | version = "1.0.7" 134 | 135 | for_each = { for t in local.tiered_vpcs : t.name => t } 136 | 137 | env_prefix = var.env_prefix 138 | region_az_labels = var.region_az_labels 139 | tiered_vpc = each.value 140 | } 141 | 142 | output "vpcs_natgw_eips_per_az" { 143 | value = { for v in module.vpcs : v.name => v.public_natgw_az_to_eip } 144 | } 145 | 146 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.31.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/README.md: -------------------------------------------------------------------------------- 1 | # Full Mesh Trio Demo 2 | [Full Mesh Trio module](https://github.com/JudeQuintana/terraform-aws-full-mesh-trio) takes in three Centralized Routers and composes a cross-region TGW full mesh topology from existing hub spokes in AWS. It peers and generates routes for TGWs and their respective VPCs. 3 | 4 | The resulting architecture is a full mesh between 3 cross-region hub spoke topologies: 5 | ![full-mesh-trio](https://jq1-io.s3.amazonaws.com/full-mesh-trio/full-mesh-trio-new.png) 6 | 7 | --- 8 | 9 | ### Bonus Update! 10 | 11 | [VPC Peering Deluxe module](https://github.com/JudeQuintana/terraform-aws-vpc-peering-deluxe): 12 | - VPC Peering Deluxe module will create appropriate routes for all subnets in each cross region Tiered VPC-NG by default. 13 | - The module also works for intra-region VPCs. 14 | - Specific subnet cidrs can be selected (instead of default behavior) to route across the VPC peering connection via only_route_subnet_cidrs variable list. 15 | - Additional option to allow remote dns resolution too. 16 | - Can be used in tandem with Centralized Router, Super Router, Full Mesh Trio and Mega Mesh for workloads that transfer lots of data to save on cost instead of via TGW (especially intra region). 17 | 18 | Important: 19 | - If you've ran this demo before then it's possible that you'll need to run `terraform get -update` to get the updated Tiered VPC-NG outputs needed for VPC Peering Deluxe. 20 | 21 | cross region Full mesh with cross region and intra-region VPC peering: 22 | ![full-mesh-trio-with-vpc-peering](https://jq1-io.s3.amazonaws.com/full-mesh-trio/full-mesh-trio-with-two-vpc-peering-examples.png) 23 | 24 | --- 25 | 26 | Related articles: 27 | - Blog Post coming soon... 28 | 29 | Demo: 30 | - Pre-requisite: AWS account, may need to increase your VPC and or TGW quota for 31 | each us-east-1, us-east-2, us-west-2 depending on how many you currently have. 32 | This demo will be creating 6 VPCs (2 in each region) and 3 TGWs (1 in each region) 33 | 34 | 1. It begins: 35 | - `terraform init` 36 | 37 | 2. Apply VPCs (must exist before Centralized Routers): 38 | - `terraform apply -target module.vpcs_use1 -target module.vpcs_use2 -target module.vpcs_usw2` 39 | 40 | 3. Apply S3 Gateways, Full Mesh Intra VPC Security Group Rules (will auto apply it's dependent modules Intra Security Group Rules for each region) for EC2 access across VPC regions (ie ssh and ping) for VPCs in a TGW Full Mesh configuration. 41 | - `terraform apply -target aws_vpc_endpoint.s3_use1 -target aws_vpc_endpoint.s3_use2 -target aws_vpc_endpoint.s3_usw2 -target module.full_mesh_intra_vpc_security_group_rules` 42 | 43 | 4. Apply VPC Peering Deluxe and Centralized Routers: 44 | - `terraform apply -target module.vpc_peering_deluxe_use1_general2_to_use2_cicd1 -target module.vpc_peering_deluxe_usw2_app1_to_usw2_general1 -target module.centralized_router_use1 -target module.centralized_router_use2 -target module.centralized_router_usw2` 45 | 46 | 5. Apply Full Mesh Trio: 47 | - `terraform apply -target module.full_mesh_trio` 48 | 49 | Note: You can combine steps 3 though 5 with `terraform apply`. 50 | 51 | Full Mesh Trio is now complete! 52 | 53 | Note: If we were using this in Terraform Cloud then it would be best for each of the module applys above to be in their own separate networking workspace with triggers. For example, if a VPC or AZ is added in it's own VPC workspace then apply and trigger the centralized router workspace to build routes, then trigger full mesh trio) 54 | 55 | Routing and peering validation with AWS Route Analyzer: 56 | - Go to [AWS Network Manager](https://us-west-2.console.aws.amazon.com/networkmanager/home?region=us-east-1#/networks) (free to use) 57 | - Create global network -> `next` 58 | - UNCHECK `Add core network in your global network` or you will be billed extra -> `next` 59 | - Select new global network -> go to `Transit Gateways` -> `Register 60 | Transit Gateway` -> Select TGWs -> `Register Transit Gateway` -> wait until all states say `Available` 61 | - Go to `Transit gateway network` -> `Route Analyzer` 62 | - Cross-Region Test 1 (use1a to use2c) 63 | - Source: 64 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 65 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app2-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 66 | - IP Address: `10.0.4.70` (`haproxy1` public subnet) 67 | - Destination: 68 | - Transit Gateway: Choose `TEST-centralized-router-magento-use2` 69 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-infra1-use2 <-> TEST-centralized-router-magneto-use2` (VPC) 70 | - IP Address: `172.16.16.10` (`jenkins2` private subnet) 71 | - Select `Run Route Analysis` 72 | - Forward and Return Paths should both have a `Connected` status. 73 | - Cross-Region Test 2 (use2a to usw2a) 74 | - Source: 75 | - Transit Gateway: Choose `TEST-centralized-router-magneto-use2` 76 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-cicd1-use2 <-> TEST-centralized-router-magneto-use2` (VPC) 77 | - IP Address: `172.16.6.8` (`jenkins1` private subnet) 78 | - Destination: 79 | - Transit Gateway: Choose `TEST-centralized-router-arch-angel-usw2` 80 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app1-usw2 <-> TEST-centralized-router-arch-angel-usw2` (VPC) 81 | - IP Address: `10.0.19.9` (`random1` public subnet) 82 | - Select `Run Route Analysis` 83 | - Forward and Return Paths should both have a `Connected` status. 84 | - Cross-Region Test 3 (usw2c to use1c) 85 | - Source: 86 | - Transit Gateway: Choose `TEST-centralized-router-arch-angel-usw2` 87 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general1-usw2 <-> TEST-centralized-router-arch-angel-usw2` (VPC) 88 | - IP Address: `192.168.16.3` (`experiment1` private subnet) 89 | - Destination: 90 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 91 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general2-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 92 | - IP Address: `192.168.11.4` (`experiment2` private subnet) 93 | - Select `Run Route Analysis` 94 | - Forward and Return Paths should both have a `Connected` status. 95 | Several other routes can be validated, try them out! 96 | 97 | Tear down: 98 | - `terraform destroy` 99 | 100 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/blackhole_cidrs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # blackhole cidr app1 usw2b haproxy2 public subnet on all centralized routers 3 | blackhole_cidrs = ["10.0.31.64/26"] 4 | } 5 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/centralized_router_use1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.use1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "mystique" 13 | amazon_side_asn = 64519 14 | vpcs = module.vpcs_use1 15 | blackhole_cidrs = local.blackhole_cidrs 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/centralized_router_use2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.use2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "magneto" 13 | amazon_side_asn = 64520 14 | vpcs = module.vpcs_use2 15 | blackhole_cidrs = local.blackhole_cidrs 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/centralized_router_usw2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_usw2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.usw2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "arch-angel" 13 | amazon_side_asn = 64521 14 | vpcs = module.vpcs_usw2 15 | blackhole_cidrs = local.blackhole_cidrs 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/full_mesh_trio.tf: -------------------------------------------------------------------------------- 1 | module "full_mesh_trio" { 2 | source = "JudeQuintana/full-mesh-trio/aws" 3 | version = "1.0.0" 4 | 5 | providers = { 6 | aws.one = aws.use1 7 | aws.two = aws.use2 8 | aws.three = aws.usw2 9 | } 10 | 11 | env_prefix = var.env_prefix 12 | full_mesh_trio = { 13 | one = { 14 | centralized_router = module.centralized_router_use1 15 | } 16 | two = { 17 | centralized_router = module.centralized_router_use2 18 | } 19 | three = { 20 | centralized_router = module.centralized_router_usw2 21 | } 22 | } 23 | } 24 | 25 | output "full_mesh_trio" { 26 | value = module.full_mesh_trio 27 | } 28 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/providers.tf: -------------------------------------------------------------------------------- 1 | # base region 2 | provider "aws" { 3 | region = "us-east-1" 4 | } 5 | 6 | provider "aws" { 7 | alias = "use1" 8 | region = "us-east-1" 9 | } 10 | 11 | provider "aws" { 12 | alias = "use2" 13 | region = "us-east-2" 14 | } 15 | 16 | provider "aws" { 17 | alias = "usw2" 18 | region = "us-west-2" 19 | } 20 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/security_group_rules.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # allow all ssh and ping communication between all VPCs within each region's intra-vpc security group 3 | intra_vpc_security_group_rules = [ 4 | { 5 | label = "ssh" 6 | protocol = "tcp" 7 | from_port = 22 8 | to_port = 22 9 | }, 10 | { 11 | label = "ping" 12 | protocol = "icmp" 13 | from_port = 8 14 | to_port = 0 15 | } 16 | ] 17 | } 18 | 19 | module "intra_vpc_security_group_rules_use1" { 20 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 21 | version = "1.0.0" 22 | 23 | providers = { 24 | aws = aws.use1 25 | } 26 | 27 | for_each = { for r in local.intra_vpc_security_group_rules : r.label => r } 28 | 29 | env_prefix = var.env_prefix 30 | region_az_labels = var.region_az_labels 31 | intra_vpc_security_group_rule = { 32 | rule = each.value 33 | vpcs = module.vpcs_use1 34 | } 35 | } 36 | 37 | module "intra_vpc_security_group_rules_use2" { 38 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 39 | version = "1.0.0" 40 | 41 | providers = { 42 | aws = aws.use2 43 | } 44 | 45 | for_each = { for r in local.intra_vpc_security_group_rules : r.label => r } 46 | 47 | env_prefix = var.env_prefix 48 | region_az_labels = var.region_az_labels 49 | intra_vpc_security_group_rule = { 50 | rule = each.value 51 | vpcs = module.vpcs_use2 52 | } 53 | } 54 | 55 | module "intra_vpc_security_group_rules_usw2" { 56 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 57 | version = "1.0.0" 58 | 59 | providers = { 60 | aws = aws.usw2 61 | } 62 | 63 | for_each = { for r in local.intra_vpc_security_group_rules : r.label => r } 64 | 65 | env_prefix = var.env_prefix 66 | region_az_labels = var.region_az_labels 67 | intra_vpc_security_group_rule = { 68 | rule = each.value 69 | vpcs = module.vpcs_usw2 70 | } 71 | } 72 | 73 | # allow all ssh and ping communication between all VPCs across regions in each intra-vpc security group 74 | module "full_mesh_intra_vpc_security_group_rules" { 75 | source = "JudeQuintana/full-mesh-intra-vpc-security-group-rules/aws" 76 | version = "1.0.0" 77 | 78 | providers = { 79 | aws.one = aws.use1 80 | aws.two = aws.use2 81 | aws.three = aws.usw2 82 | } 83 | 84 | env_prefix = var.env_prefix 85 | region_az_labels = var.region_az_labels 86 | full_mesh_intra_vpc_security_group_rules = { 87 | one = { 88 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use1 89 | } 90 | two = { 91 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use2 92 | } 93 | three = { 94 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_usw2 95 | } 96 | } 97 | } 98 | 99 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "region_az_labels" { 8 | description = "Update this map with regions and AZs that will be in use for short name labeling" 9 | type = map(string) 10 | default = { 11 | us-west-2 = "usw2" 12 | us-west-2a = "usw2a" 13 | us-west-2b = "usw2b" 14 | us-west-2c = "usw2c" 15 | us-east-1 = "use1" 16 | us-east-1a = "use1a" 17 | us-east-1b = "use1b" 18 | us-east-1c = "use1c" 19 | us-east-2 = "use2" 20 | us-east-2a = "use2a" 21 | us-east-2b = "use2b" 22 | us-east-2c = "use2c" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpc_endpoint_service_name_fmt = "com.amazonaws.%s.s3" 4 | vpc_endpoint_type = "Gateway" 5 | 6 | vpcs_use1_with_private_route_table_ids = { for this in module.vpcs_use1 : this.name => this if length(this.private_route_table_ids) > 0 } 7 | vpcs_use2_with_private_route_table_ids = { for this in module.vpcs_use2 : this.name => this if length(this.private_route_table_ids) > 0 } 8 | vpcs_usw2_with_private_route_table_ids = { for this in module.vpcs_usw2 : this.name => this if length(this.private_route_table_ids) > 0 } 9 | } 10 | 11 | resource "aws_vpc_endpoint" "s3_use1" { 12 | provider = aws.use1 13 | 14 | for_each = local.vpcs_use1_with_private_route_table_ids 15 | 16 | vpc_id = each.value.id 17 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 18 | vpc_endpoint_type = local.vpc_endpoint_type 19 | route_table_ids = each.value.private_route_table_ids 20 | } 21 | 22 | resource "aws_vpc_endpoint" "s3_use2" { 23 | provider = aws.use2 24 | 25 | for_each = local.vpcs_use2_with_private_route_table_ids 26 | 27 | vpc_id = each.value.id 28 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 29 | vpc_endpoint_type = local.vpc_endpoint_type 30 | route_table_ids = each.value.private_route_table_ids 31 | } 32 | 33 | resource "aws_vpc_endpoint" "s3_usw2" { 34 | provider = aws.usw2 35 | 36 | for_each = local.vpcs_usw2_with_private_route_table_ids 37 | 38 | vpc_id = each.value.id 39 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 40 | vpc_endpoint_type = local.vpc_endpoint_type 41 | route_table_ids = each.value.private_route_table_ids 42 | } 43 | 44 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/vpc_peering.tf: -------------------------------------------------------------------------------- 1 | # cross region peering, only route specific subnets across peering connection 2 | module "vpc_peering_deluxe_use1_general2_to_use2_cicd1" { 3 | source = "JudeQuintana/vpc-peering-deluxe/aws" 4 | version = "1.0.0" 5 | 6 | providers = { 7 | aws.local = aws.use1 8 | aws.peer = aws.use2 9 | } 10 | 11 | env_prefix = var.env_prefix 12 | vpc_peering_deluxe = { 13 | local = { 14 | vpc = lookup(module.vpcs_use1, "general2") 15 | # use1 public random1 16 | only_route_subnet_cidrs = ["192.168.13.0/28"] 17 | } 18 | peer = { 19 | vpc = lookup(module.vpcs_use2, "cicd1") 20 | # use2 private jenkins1 21 | only_route_subnet_cidrs = ["172.16.1.0/24"] 22 | } 23 | } 24 | } 25 | 26 | # intra region vpc peering, route all subnets across peering connection 27 | module "vpc_peering_deluxe_usw2_app1_to_usw2_general1" { 28 | source = "JudeQuintana/vpc-peering-deluxe/aws" 29 | version = "1.0.0" 30 | 31 | providers = { 32 | aws.local = aws.usw2 33 | aws.peer = aws.usw2 34 | } 35 | 36 | env_prefix = var.env_prefix 37 | vpc_peering_deluxe = { 38 | local = { 39 | vpc = lookup(module.vpcs_usw2, "app1") 40 | } 41 | peer = { 42 | vpc = lookup(module.vpcs_usw2, "general1") 43 | } 44 | } 45 | } 46 | 47 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/vpcs_use1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_use1 = [ 3 | { 4 | name = "app2" 5 | network_cidr = "10.0.0.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.0.0/24", special = true } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.3.0/28" }, 15 | { name = "haproxy1", cidr = "10.0.4.64/26" } 16 | ] 17 | } 18 | b = { 19 | private_subnets = [ 20 | { name = "cluster2", cidr = "10.0.10.0/24", special = true }, 21 | { name = "random2", cidr = "10.0.11.0/24" } 22 | ] 23 | public_subnets = [ 24 | { name = "random3", cidr = "10.0.12.0/24" } 25 | ] 26 | } 27 | } 28 | }, 29 | { 30 | name = "general2" 31 | network_cidr = "192.168.0.0/20" 32 | azs = { 33 | a = { 34 | private_subnets = [ 35 | { name = "data1", cidr = "192.168.0.0/24" }, 36 | { name = "data2", cidr = "192.168.1.0/24" } 37 | ] 38 | public_subnets = [ 39 | { name = "random4", cidr = "192.168.5.0/28", special = true }, 40 | { name = "haproxy4", cidr = "192.168.6.64/26" } 41 | ] 42 | } 43 | c = { 44 | private_subnets = [ 45 | { name = "experiment1", cidr = "192.168.10.0/24" }, 46 | { name = "experiment2", cidr = "192.168.11.0/24", special = true } 47 | ] 48 | public_subnets = [ 49 | { name = "random1", cidr = "192.168.13.0/28" }, 50 | { name = "haproxy1", cidr = "192.168.14.64/26" } 51 | ] 52 | } 53 | } 54 | } 55 | ] 56 | } 57 | 58 | module "vpcs_use1" { 59 | source = "JudeQuintana/tiered-vpc-ng/aws" 60 | version = "1.0.1" 61 | 62 | providers = { 63 | aws = aws.use1 64 | } 65 | 66 | for_each = { for t in local.tiered_vpcs_use1 : t.name => t } 67 | 68 | env_prefix = var.env_prefix 69 | region_az_labels = var.region_az_labels 70 | tiered_vpc = each.value 71 | } 72 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/vpcs_use2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_use2 = [ 3 | { 4 | name = "cicd1" 5 | network_cidr = "172.16.0.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "jenkins1", cidr = "172.16.1.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "172.16.6.0/26", special = true }, 15 | { name = "natgw1", cidr = "172.16.5.0/28" } 16 | ] 17 | } 18 | b = { 19 | private_subnets = [ 20 | { name = "artifacts1", cidr = "172.16.10.0/24", special = true } 21 | ] 22 | } 23 | } 24 | }, 25 | { 26 | name = "infra1" 27 | network_cidr = "172.16.16.0/20" 28 | azs = { 29 | a = { 30 | private_subnets = [ 31 | { name = "artifacts2", cidr = "172.16.22.0/24" } 32 | ] 33 | public_subnets = [ 34 | { name = "random1", cidr = "172.16.23.0/28", special = true } 35 | ] 36 | } 37 | c = { 38 | private_subnets = [ 39 | { name = "jenkins2", cidr = "172.16.16.0/24", special = true } 40 | ] 41 | public_subnets = [ 42 | { name = "random2", cidr = "172.16.19.0/28" } 43 | ] 44 | } 45 | } 46 | } 47 | ] 48 | } 49 | 50 | module "vpcs_use2" { 51 | source = "JudeQuintana/tiered-vpc-ng/aws" 52 | version = "1.0.1" 53 | 54 | providers = { 55 | aws = aws.use2 56 | } 57 | 58 | for_each = { for t in local.tiered_vpcs_use2 : t.name => t } 59 | 60 | env_prefix = var.env_prefix 61 | region_az_labels = var.region_az_labels 62 | tiered_vpc = each.value 63 | } 64 | 65 | -------------------------------------------------------------------------------- /full_mesh_trio_demo/vpcs_usw2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_usw2 = [ 3 | { 4 | name = "app1" 5 | network_cidr = "10.0.16.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.16.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.19.0/28", special = true }, 15 | { name = "haproxy1", cidr = "10.0.21.64/26" } 16 | ] 17 | } 18 | b = { 19 | private_subnets = [ 20 | { name = "cluster2", cidr = "10.0.27.0/24" } 21 | ] 22 | public_subnets = [ 23 | { name = "random2", cidr = "10.0.30.0/28", special = true }, 24 | { name = "haproxy2", cidr = "10.0.31.64/26" } 25 | ] 26 | } 27 | } 28 | }, 29 | { 30 | name = "general1" 31 | network_cidr = "192.168.16.0/20" 32 | azs = { 33 | a = { 34 | private_subnets = [ 35 | { name = "cluster4", cidr = "192.168.21.0/24" } 36 | ] 37 | public_subnets = [ 38 | { name = "random2", cidr = "192.168.22.0/28", special = true }, 39 | { name = "haproxy1", cidr = "192.168.23.64/26" } 40 | ] 41 | } 42 | c = { 43 | private_subnets = [ 44 | { name = "experiment1", cidr = "192.168.16.0/24" } 45 | ] 46 | public_subnets = [ 47 | { name = "random3", cidr = "192.168.19.0/28", special = true }, 48 | { name = "haproxy3", cidr = "192.168.20.64/26" } 49 | ] 50 | } 51 | } 52 | } 53 | ] 54 | } 55 | 56 | module "vpcs_usw2" { 57 | source = "JudeQuintana/tiered-vpc-ng/aws" 58 | version = "1.0.1" 59 | 60 | providers = { 61 | aws = aws.usw2 62 | } 63 | 64 | for_each = { for t in local.tiered_vpcs_usw2 : t.name => t } 65 | 66 | env_prefix = var.env_prefix 67 | region_az_labels = var.region_az_labels 68 | tiered_vpc = each.value 69 | } 70 | 71 | -------------------------------------------------------------------------------- /mega_mesh_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /mega_mesh_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.31.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /mega_mesh_demo/README.md: -------------------------------------------------------------------------------- 1 | # Mega Mesh 2 | Mega Mesh == (Full Mesh Trio)² + 1 3 | 4 | [Mega Mesh module](https://github.com/JudeQuintana/terraform-aws-mega-mesh) takes in 10 Centralized Routers and composes a Full Mesh Transit Gateway topology across 10 regions from existing hub spokes in AWS. It peers and generates routes for TGWs and their respective VPCs. 5 | 6 | ![mega-mesh](https://jq1-io.s3.amazonaws.com/mega-mesh/ten-full-mesh-tgw.png) 7 | 8 | --- 9 | - Sometimes I get questions regarding the difference between full mesh TGWs 10 | and CloudWAN. I'm reposting my reply here: 11 | 12 | > I haven't had the chance work with CloudWAN yet. I've talked to a few people about the differences. 13 | > 14 | > There can only be one routing domain with full mesh TGW topology because you can only have one peering link between a pair of TGWs so you can't really segment traffic across regions even when you're separating route tables across TGWs. 15 | > 16 | > CloudWAN allows for multiple routing domains spanning regions via segments. You can choose to share routes between them and it hides the peering, route table, etc management. The high cost tradeoff is it makes managing large networks with traffic segmentation easier vs engineers managing a large TGW topology themselves. 17 | > 18 | > That's what I've gathered so far but maybe more knowledgeable people can chime in. Hopefully I'll be able to tinker with CloudWAN in the near future. 19 | 20 | --- 21 | 22 | 1. It begins 23 | - `terraform init` 24 | 25 | 2. Build VPCs (must exist before centralized routers and mega mesh) and S3 Gateways: 26 | - `terraform apply -target module.vpcs_use1 -target module.vpcs_usw1 -target module.vpcs_euc1 -target module.vpcs_euw1 -target module.vpcs_apne1 -target module.vpcs_apse1 -target module.vpcs_cac1 -target module.vpcs_sae1 -target module.vpcs_use2 -target module.vpcs_usw2` 27 | 28 | 3. Build S3 Gateways 29 | - `terraform apply -target aws_vpc_endpoint.s3_use1 -target aws_vpc_endpoint.s3_usw1 -target aws_vpc_endpoint.s3_euc1 -target aws_vpc_endpoint.s3_euw1 -target aws_vpc_endpoint.s3_apne1 -target aws_vpc_endpoint.s3_apse1 -target aws_vpc_endpoint.s3_cac1 -target aws_vpc_endpoint.s3_sae1 -target aws_vpc_endpoint.s3_use2 -target aws_vpc_endpoint.s3_usw2` 30 | 31 | 4. Build Centralized Routers 32 | - `terraform apply -target module.centralized_router_use1 -target module.centralized_router_usw1 -target module.centralized_router_euc1 -target module.centralized_router_euw1 -target module.centralized_router_apne1 -target module.centralized_router_apse1 -target module.centralized_router_sae1 -target module.centralized_router_use2 -target module.centralized_router_usw2` 33 | 34 | 5. Build Mega Mesh 35 | - `terraform apply -target module.mega_mesh` 36 | 37 | Mesh Complete! 38 | 39 | Notes: 40 | - You can combine steps 3 and 5 with `terraform apply`. 41 | - Add blackhole cidrs on any centralized router via the 42 | `var.centralized_router.blackhole_cidrs` list to create blackhole routes or aggregate routes. 43 | - Available AZs (a,b,c etc) in a region are different per AWS account (ie. your us-west-2a is not the same AZ as my us-west-2a) so it's possible you'll need to change the AZ letter for a VPC if the provider is saying it's not available for the region. 44 | 45 | Routing and peering validation with AWS Route Analyzer: 46 | - Go to [AWS Network Manager](https://us-west-2.console.aws.amazon.com/networkmanager/home?region=us-east-1#/networks) (free to use) 47 | - Create global network -> `next` 48 | - UNCHECK `Add core network in your global network` or you will be billed extra -> `next` 49 | - Select new global network -> go to `Transit Gateways` -> `Register 50 | Transit Gateway` -> Select TGWs -> `Register Transit Gateway` -> wait until all states say `Available` 51 | 52 | - Go to `Transit gateway network` -> `Route Analyzer` 53 | - Cross-Region Test 1 54 | - Source: 55 | - Transit Gateway: Choose `TEST-centralized-router-mystique-use1` 56 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app1-use1 <-> TEST-centralized-router-mystique-use1` (VPC) 57 | - IP Address: `10.0.11.4` 58 | - Destination: 59 | - Transit Gateway: Choose `TEST-centralized-router-gambit-apse1` 60 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app6-apse1 <-> TEST-centralized-gambit-apse1` (VPC) 61 | - IP Address: `10.0.64.7` 62 | - Select `Run Route Analysis` 63 | - Forward and Return Paths should both have a `Connected` status. 64 | 65 | - Cross-Region Test 2 66 | - Source: 67 | - Transit Gateway: Choose `TEST-centralized-router-gambit-apse1` 68 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app6-apse1 <-> TEST-centralized-router-gambit-apse1` (VPC) 69 | - IP Address: `10.0.70.8` 70 | - Destination: 71 | - Transit Gateway: Choose `TEST-centralized-router-rogue-euw1` 72 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general4-euw1 <-> TEST-centralized-rogue-euw1` (VPC) 73 | - IP Address: `192.168.38.6` 74 | - Select `Run Route Analysis` 75 | - Forward and Return Paths should both have a `Connected` status. 76 | 77 | - Cross-Region Test 3 78 | - Source: 79 | - Transit Gateway: Choose `TEST-centralized-router-wolverine-sae1` 80 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app8-apse1 <-> TEST-centralized-router-woverine-sae1` (VPC) 81 | - IP Address: `10.0.128.10` 82 | - Destination: 83 | - Transit Gateway: Choose `TEST-centralized-router-jean-grey-apne1` 84 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app5-apne1 <-> TEST-centralized-jean-grey-apne1` (VPC) 85 | - IP Address: `172.16.40.9` 86 | - Select `Run Route Analysis` 87 | - Forward and Return Paths should both have a `Connected` status. 88 | 89 | Several other routes can be validated, try them out! 90 | 91 | Tear down: 92 | - `terraform destroy` (long pause) 93 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_apne1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_apne1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.apne1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "jean-grey" 13 | amazon_side_asn = 64523 14 | vpcs = module.vpcs_apne1 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_apse1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_apse1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.apse1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "gambit" 13 | amazon_side_asn = 64524 14 | vpcs = module.vpcs_apse1 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_cac1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_cac1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.cac1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "beast" 13 | amazon_side_asn = 64525 14 | vpcs = module.vpcs_cac1 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_euc1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_euc1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.euc1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "arch-angel" 13 | amazon_side_asn = 64521 14 | vpcs = module.vpcs_euc1 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_euw1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_euw1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.euw1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "rogue" 13 | amazon_side_asn = 64522 14 | vpcs = module.vpcs_euw1 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_sae1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_sae1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.sae1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "wolverine" 13 | amazon_side_asn = 64526 14 | vpcs = module.vpcs_sae1 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_use1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.use1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "mystique" 13 | amazon_side_asn = 64519 14 | vpcs = module.vpcs_use1 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_use2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_use2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.use2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "apocalypse" 13 | amazon_side_asn = 64527 14 | vpcs = module.vpcs_use2 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_usw1.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_usw1" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.usw1 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "magneto" 13 | amazon_side_asn = 64520 14 | vpcs = module.vpcs_usw1 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /mega_mesh_demo/centralized_router_usw2.tf: -------------------------------------------------------------------------------- 1 | module "centralized_router_usw2" { 2 | source = "JudeQuintana/centralized-router/aws" 3 | version = "1.0.1" 4 | 5 | providers = { 6 | aws = aws.usw2 7 | } 8 | 9 | env_prefix = var.env_prefix 10 | region_az_labels = var.region_az_labels 11 | centralized_router = { 12 | name = "storm" 13 | amazon_side_asn = 64528 14 | vpcs = module.vpcs_usw2 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /mega_mesh_demo/mega_mesh.tf: -------------------------------------------------------------------------------- 1 | module "mega_mesh" { 2 | source = "JudeQuintana/mega-mesh/aws" 3 | version = "1.0.0" 4 | 5 | providers = { 6 | aws.one = aws.use1 7 | aws.two = aws.usw1 8 | aws.three = aws.euc1 9 | aws.four = aws.euw1 10 | aws.five = aws.apne1 11 | aws.six = aws.apse1 12 | aws.seven = aws.cac1 13 | aws.eight = aws.sae1 14 | aws.nine = aws.use2 15 | aws.ten = aws.usw2 16 | } 17 | 18 | env_prefix = var.env_prefix 19 | mega_mesh = { 20 | one = { 21 | centralized_router = module.centralized_router_use1 22 | } 23 | two = { 24 | centralized_router = module.centralized_router_usw1 25 | } 26 | three = { 27 | centralized_router = module.centralized_router_euc1 28 | } 29 | four = { 30 | centralized_router = module.centralized_router_euw1 31 | } 32 | five = { 33 | centralized_router = module.centralized_router_apne1 34 | } 35 | six = { 36 | centralized_router = module.centralized_router_apse1 37 | } 38 | seven = { 39 | centralized_router = module.centralized_router_cac1 40 | } 41 | eight = { 42 | centralized_router = module.centralized_router_sae1 43 | } 44 | nine = { 45 | centralized_router = module.centralized_router_use2 46 | } 47 | ten = { 48 | centralized_router = module.centralized_router_usw2 49 | } 50 | } 51 | } 52 | 53 | output "mega_mesh" { 54 | value = module.mega_mesh 55 | } 56 | 57 | -------------------------------------------------------------------------------- /mega_mesh_demo/providers.tf: -------------------------------------------------------------------------------- 1 | # base region 2 | provider "aws" { 3 | region = "us-east-1" 4 | } 5 | 6 | provider "aws" { 7 | alias = "use1" 8 | region = "us-east-1" 9 | } 10 | 11 | provider "aws" { 12 | alias = "usw1" 13 | region = "us-west-1" 14 | } 15 | 16 | provider "aws" { 17 | alias = "euc1" 18 | region = "eu-central-1" 19 | } 20 | 21 | provider "aws" { 22 | alias = "euw1" 23 | region = "eu-west-1" 24 | } 25 | 26 | provider "aws" { 27 | alias = "apne1" 28 | region = "ap-northeast-1" 29 | } 30 | 31 | provider "aws" { 32 | alias = "apse1" 33 | region = "ap-southeast-1" 34 | } 35 | 36 | provider "aws" { 37 | alias = "cac1" 38 | region = "ca-central-1" 39 | } 40 | 41 | provider "aws" { 42 | alias = "sae1" 43 | region = "sa-east-1" 44 | } 45 | 46 | provider "aws" { 47 | alias = "use2" 48 | region = "us-east-2" 49 | } 50 | 51 | provider "aws" { 52 | alias = "usw2" 53 | region = "us-west-2" 54 | } 55 | 56 | -------------------------------------------------------------------------------- /mega_mesh_demo/security_group_rules.tf: -------------------------------------------------------------------------------- 1 | # TODO 2 | # mega mesh intra vpc security group rules 3 | -------------------------------------------------------------------------------- /mega_mesh_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "region_az_labels" { 8 | description = "Update this map with regions and AZs that will be in use for short name labeling" 9 | type = map(string) 10 | default = { 11 | us-east-1 = "use1" 12 | us-east-1a = "use1a" 13 | us-east-1b = "use1b" 14 | us-east-1c = "use1c" 15 | us-west-1 = "usw1" 16 | us-west-1a = "usw1a" 17 | us-west-1b = "usw1b" 18 | us-west-1c = "usw1c" 19 | eu-central-1 = "euc1" 20 | eu-central-1a = "euc1a" 21 | eu-central-1b = "euc1b" 22 | eu-central-1c = "euc1c" 23 | eu-west-1 = "euw1" 24 | eu-west-1a = "euw1a" 25 | eu-west-1b = "euw1b" 26 | eu-west-1c = "euw1c" 27 | ap-northeast-1 = "apne1" 28 | ap-northeast-1a = "apne1a" 29 | ap-northeast-1b = "apne1b" 30 | ap-northeast-1c = "apne1c" 31 | ap-southeast-1 = "apse1" 32 | ap-southeast-1a = "apse1a" 33 | ap-southeast-1b = "apse1b" 34 | ap-southeast-1c = "apse1c" 35 | ca-central-1 = "cac1" 36 | ca-central-1a = "cac1a" 37 | ca-central-1b = "cac1b" 38 | ca-central-1c = "cac1c" 39 | ca-central-1d = "cac1d" 40 | sa-east-1 = "sae1" 41 | sa-east-1a = "sae1a" 42 | sa-east-1b = "sae1b" 43 | sa-east-1c = "sae1c" 44 | us-east-2 = "use1" 45 | us-east-2a = "use1a" 46 | us-east-2b = "use1b" 47 | us-east-2c = "use1c" 48 | us-west-2 = "usw1" 49 | us-west-2a = "usw1a" 50 | us-west-2b = "usw1b" 51 | us-west-2c = "usw1c" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /mega_mesh_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpc_endpoint_service_name_fmt = "com.amazonaws.%s.s3" 4 | vpc_endpoint_type = "Gateway" 5 | 6 | vpcs_apne1_with_private_route_table_ids = { for this in module.vpcs_apne1 : this.name => this if length(this.private_route_table_ids) > 0 } 7 | vpcs_apse1_with_private_route_table_ids = { for this in module.vpcs_apse1 : this.name => this if length(this.private_route_table_ids) > 0 } 8 | vpcs_cac1_with_private_route_table_ids = { for this in module.vpcs_cac1 : this.name => this if length(this.private_route_table_ids) > 0 } 9 | vpcs_euc1_with_private_route_table_ids = { for this in module.vpcs_euc1 : this.name => this if length(this.private_route_table_ids) > 0 } 10 | vpcs_euw1_with_private_route_table_ids = { for this in module.vpcs_euw1 : this.name => this if length(this.private_route_table_ids) > 0 } 11 | vpcs_sae1_with_private_route_table_ids = { for this in module.vpcs_sae1 : this.name => this if length(this.private_route_table_ids) > 0 } 12 | vpcs_use1_with_private_route_table_ids = { for this in module.vpcs_use1 : this.name => this if length(this.private_route_table_ids) > 0 } 13 | vpcs_use2_with_private_route_table_ids = { for this in module.vpcs_use2 : this.name => this if length(this.private_route_table_ids) > 0 } 14 | vpcs_usw1_with_private_route_table_ids = { for this in module.vpcs_usw1 : this.name => this if length(this.private_route_table_ids) > 0 } 15 | vpcs_usw2_with_private_route_table_ids = { for this in module.vpcs_usw2 : this.name => this if length(this.private_route_table_ids) > 0 } 16 | } 17 | 18 | resource "aws_vpc_endpoint" "s3_apne1" { 19 | provider = aws.apne1 20 | 21 | for_each = local.vpcs_apne1_with_private_route_table_ids 22 | 23 | vpc_id = each.value.id 24 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 25 | vpc_endpoint_type = local.vpc_endpoint_type 26 | route_table_ids = each.value.private_route_table_ids 27 | } 28 | 29 | resource "aws_vpc_endpoint" "s3_apse1" { 30 | provider = aws.apse1 31 | 32 | for_each = local.vpcs_apse1_with_private_route_table_ids 33 | 34 | vpc_id = each.value.id 35 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 36 | vpc_endpoint_type = local.vpc_endpoint_type 37 | route_table_ids = each.value.private_route_table_ids 38 | } 39 | 40 | resource "aws_vpc_endpoint" "s3_cac1" { 41 | provider = aws.cac1 42 | 43 | for_each = local.vpcs_cac1_with_private_route_table_ids 44 | 45 | vpc_id = each.value.id 46 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 47 | vpc_endpoint_type = local.vpc_endpoint_type 48 | route_table_ids = each.value.private_route_table_ids 49 | } 50 | 51 | resource "aws_vpc_endpoint" "s3_euc1" { 52 | provider = aws.euc1 53 | 54 | for_each = local.vpcs_euc1_with_private_route_table_ids 55 | 56 | vpc_id = each.value.id 57 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 58 | vpc_endpoint_type = local.vpc_endpoint_type 59 | route_table_ids = each.value.private_route_table_ids 60 | } 61 | 62 | resource "aws_vpc_endpoint" "s3_euw1" { 63 | provider = aws.euw1 64 | 65 | for_each = local.vpcs_euw1_with_private_route_table_ids 66 | 67 | vpc_id = each.value.id 68 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 69 | vpc_endpoint_type = local.vpc_endpoint_type 70 | route_table_ids = each.value.private_route_table_ids 71 | } 72 | 73 | resource "aws_vpc_endpoint" "s3_sae1" { 74 | provider = aws.sae1 75 | 76 | for_each = local.vpcs_sae1_with_private_route_table_ids 77 | 78 | vpc_id = each.value.id 79 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 80 | vpc_endpoint_type = local.vpc_endpoint_type 81 | route_table_ids = each.value.private_route_table_ids 82 | } 83 | 84 | resource "aws_vpc_endpoint" "s3_use1" { 85 | provider = aws.use1 86 | 87 | for_each = local.vpcs_use1_with_private_route_table_ids 88 | 89 | vpc_id = each.value.id 90 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 91 | vpc_endpoint_type = local.vpc_endpoint_type 92 | route_table_ids = each.value.private_route_table_ids 93 | } 94 | 95 | resource "aws_vpc_endpoint" "s3_use2" { 96 | provider = aws.use2 97 | 98 | for_each = local.vpcs_use2_with_private_route_table_ids 99 | 100 | vpc_id = each.value.id 101 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 102 | vpc_endpoint_type = local.vpc_endpoint_type 103 | route_table_ids = each.value.private_route_table_ids 104 | } 105 | 106 | resource "aws_vpc_endpoint" "s3_usw1" { 107 | provider = aws.usw1 108 | 109 | for_each = local.vpcs_usw1_with_private_route_table_ids 110 | 111 | vpc_id = each.value.id 112 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 113 | vpc_endpoint_type = local.vpc_endpoint_type 114 | route_table_ids = each.value.private_route_table_ids 115 | } 116 | 117 | resource "aws_vpc_endpoint" "s3_usw2" { 118 | provider = aws.usw2 119 | 120 | for_each = local.vpcs_usw2_with_private_route_table_ids 121 | 122 | vpc_id = each.value.id 123 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 124 | vpc_endpoint_type = local.vpc_endpoint_type 125 | route_table_ids = each.value.private_route_table_ids 126 | } 127 | 128 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_apne1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_apne1 = [ 3 | { 4 | name = "app5" 5 | network_cidr = "172.16.32.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "172.16.32.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "172.16.34.0/28", special = true }, 15 | ] 16 | } 17 | c = { 18 | private_subnets = [ 19 | { name = "cluster2", cidr = "172.16.40.0/24", special = true } 20 | ] 21 | } 22 | } 23 | }, 24 | { 25 | name = "general5" 26 | network_cidr = "172.16.64.0/20" 27 | azs = { 28 | a = { 29 | public_subnets = [ 30 | { name = "random2", cidr = "172.16.70.0/28", special = true } 31 | ] 32 | } 33 | c = { 34 | private_subnets = [ 35 | { name = "experiment1", cidr = "172.16.73.0/28", special = true } 36 | ] 37 | public_subnets = [ 38 | { name = "random3", cidr = "172.16.75.0/28" } 39 | ] 40 | } 41 | } 42 | } 43 | ] 44 | } 45 | 46 | module "vpcs_apne1" { 47 | source = "JudeQuintana/tiered-vpc-ng/aws" 48 | version = "1.0.1" 49 | 50 | providers = { 51 | aws = aws.apne1 52 | } 53 | 54 | for_each = { for t in local.tiered_vpcs_apne1 : t.name => t } 55 | 56 | env_prefix = var.env_prefix 57 | region_az_labels = var.region_az_labels 58 | tiered_vpc = each.value 59 | } 60 | 61 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_apse1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_apse1 = [ 3 | { 4 | name = "app6" 5 | network_cidr = "10.0.64.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.64.0/24", special = true } 10 | ] 11 | } 12 | b = { 13 | private_subnets = [ 14 | { name = "cluster2", cidr = "10.0.70.0/24" } 15 | ] 16 | # Enable a NAT Gateway for all private subnets in the same AZ 17 | # by adding the `natgw = true` attribute to any public subnet 18 | public_subnets = [ 19 | { name = "random2", cidr = "10.0.72.0/28", special = true }, 20 | ] 21 | } 22 | } 23 | }, 24 | { 25 | name = "general6" 26 | network_cidr = "192.168.64.0/20" 27 | azs = { 28 | a = { 29 | private_subnets = [ 30 | { name = "cluster4", cidr = "192.168.64.0/24", special = true } 31 | ] 32 | } 33 | c = { 34 | public_subnets = [ 35 | { name = "random3", cidr = "192.168.79.0/28", special = true }, 36 | ] 37 | } 38 | } 39 | } 40 | ] 41 | } 42 | 43 | module "vpcs_apse1" { 44 | source = "JudeQuintana/tiered-vpc-ng/aws" 45 | version = "1.0.1" 46 | 47 | providers = { 48 | aws = aws.apse1 49 | } 50 | 51 | for_each = { for t in local.tiered_vpcs_apse1 : t.name => t } 52 | 53 | env_prefix = var.env_prefix 54 | region_az_labels = var.region_az_labels 55 | tiered_vpc = each.value 56 | } 57 | 58 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_cac1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_cac1 = [ 3 | { 4 | name = "app7" 5 | network_cidr = "10.0.96.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.96.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.102.0/28", special = true }, 15 | { name = "haproxy1", cidr = "10.0.103.64/26" } 16 | ] 17 | } 18 | b = { 19 | private_subnets = [ 20 | { name = "cluster2", cidr = "10.0.108.0/24" } 21 | ] 22 | public_subnets = [ 23 | { name = "random2", cidr = "10.0.110.0/28", special = true }, 24 | { name = "haproxy2", cidr = "10.0.111.64/26" } 25 | ] 26 | } 27 | } 28 | }, 29 | { 30 | name = "general7" 31 | network_cidr = "172.16.96.0/20" 32 | azs = { 33 | a = { 34 | private_subnets = [ 35 | { name = "cluster4", cidr = "172.16.96.0/24" } 36 | ] 37 | public_subnets = [ 38 | { name = "random2", cidr = "172.16.100.0/28", special = true }, 39 | { name = "haproxy1", cidr = "172.16.102.64/26" } 40 | ] 41 | } 42 | d = { 43 | private_subnets = [ 44 | { name = "experiment1", cidr = "172.16.108.0/24" } 45 | ] 46 | public_subnets = [ 47 | { name = "random3", cidr = "172.16.110.0/28", special = true }, 48 | { name = "haproxy3", cidr = "172.16.111.64/26" } 49 | ] 50 | } 51 | } 52 | } 53 | ] 54 | } 55 | 56 | module "vpcs_cac1" { 57 | source = "JudeQuintana/tiered-vpc-ng/aws" 58 | version = "1.0.1" 59 | 60 | providers = { 61 | aws = aws.cac1 62 | } 63 | 64 | for_each = { for t in local.tiered_vpcs_cac1 : t.name => t } 65 | 66 | env_prefix = var.env_prefix 67 | region_az_labels = var.region_az_labels 68 | tiered_vpc = each.value 69 | } 70 | 71 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_euc1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_euc1 = [ 3 | { 4 | name = "app3" 5 | network_cidr = "10.0.16.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.16.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.19.0/28", special = true }, 15 | { name = "haproxy1", cidr = "10.0.21.64/26" } 16 | ] 17 | } 18 | b = { 19 | private_subnets = [ 20 | { name = "cluster2", cidr = "10.0.27.0/24" } 21 | ] 22 | public_subnets = [ 23 | { name = "random2", cidr = "10.0.30.0/28", special = true }, 24 | { name = "haproxy2", cidr = "10.0.31.64/26" } 25 | ] 26 | } 27 | } 28 | }, 29 | { 30 | name = "general3" 31 | network_cidr = "192.168.16.0/20" 32 | azs = { 33 | a = { 34 | private_subnets = [ 35 | { name = "cluster4", cidr = "192.168.21.0/24" } 36 | ] 37 | public_subnets = [ 38 | { name = "random2", cidr = "192.168.22.0/28", special = true }, 39 | { name = "haproxy1", cidr = "192.168.23.64/26" } 40 | ] 41 | } 42 | c = { 43 | private_subnets = [ 44 | { name = "experiment1", cidr = "192.168.16.0/24" } 45 | ] 46 | public_subnets = [ 47 | { name = "random3", cidr = "192.168.19.0/28", special = true }, 48 | { name = "haproxy3", cidr = "192.168.20.64/26" } 49 | ] 50 | } 51 | } 52 | } 53 | ] 54 | } 55 | 56 | module "vpcs_euc1" { 57 | source = "JudeQuintana/tiered-vpc-ng/aws" 58 | version = "1.0.1" 59 | 60 | providers = { 61 | aws = aws.euc1 62 | } 63 | 64 | for_each = { for t in local.tiered_vpcs_euc1 : t.name => t } 65 | 66 | env_prefix = var.env_prefix 67 | region_az_labels = var.region_az_labels 68 | tiered_vpc = each.value 69 | } 70 | 71 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_euw1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_euw1 = [ 3 | { 4 | name = "app4" 5 | network_cidr = "10.0.32.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.32.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.38.0/28", special = true }, 15 | ] 16 | } 17 | b = { 18 | private_subnets = [ 19 | { name = "cluster2", cidr = "10.0.42.0/24" } 20 | ] 21 | public_subnets = [ 22 | { name = "random2", cidr = "10.0.46.0/28", special = true }, 23 | ] 24 | } 25 | } 26 | }, 27 | { 28 | name = "general4" 29 | network_cidr = "192.168.32.0/20" 30 | azs = { 31 | a = { 32 | private_subnets = [ 33 | { name = "cluster4", cidr = "192.168.32.0/24" } 34 | ] 35 | public_subnets = [ 36 | { name = "random2", cidr = "192.168.35.0/28", special = true }, 37 | ] 38 | } 39 | c = { 40 | private_subnets = [ 41 | { name = "experiment1", cidr = "192.168.38.0/24" } 42 | ] 43 | public_subnets = [ 44 | { name = "random3", cidr = "192.168.40.0/28", special = true }, 45 | ] 46 | } 47 | } 48 | } 49 | ] 50 | } 51 | 52 | module "vpcs_euw1" { 53 | source = "JudeQuintana/tiered-vpc-ng/aws" 54 | version = "1.0.1" 55 | 56 | providers = { 57 | aws = aws.euw1 58 | } 59 | 60 | for_each = { for t in local.tiered_vpcs_euw1 : t.name => t } 61 | 62 | env_prefix = var.env_prefix 63 | region_az_labels = var.region_az_labels 64 | tiered_vpc = each.value 65 | } 66 | 67 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_sae1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_sae1 = [ 3 | { 4 | name = "app8" 5 | network_cidr = "10.0.128.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.128.0/24", special = true } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.130.0/28" }, 15 | ] 16 | } 17 | c = { 18 | private_subnets = [ 19 | { name = "cluster2", cidr = "10.0.132.0/24", special = true } 20 | ] 21 | } 22 | } 23 | }, 24 | { 25 | name = "general8" 26 | network_cidr = "172.16.160.0/20" 27 | azs = { 28 | a = { 29 | private_subnets = [ 30 | { name = "cluster4", cidr = "172.16.160.0/24" } 31 | ] 32 | public_subnets = [ 33 | { name = "random2", cidr = "172.16.164.0/28", special = true }, 34 | ] 35 | } 36 | c = { 37 | private_subnets = [ 38 | { name = "experiment1", cidr = "172.16.168.0/28" } 39 | ] 40 | public_subnets = [ 41 | { name = "random3", cidr = "172.16.171.0/28", special = true }, 42 | ] 43 | } 44 | } 45 | } 46 | ] 47 | } 48 | 49 | module "vpcs_sae1" { 50 | source = "JudeQuintana/tiered-vpc-ng/aws" 51 | version = "1.0.1" 52 | 53 | providers = { 54 | aws = aws.sae1 55 | } 56 | 57 | for_each = { for t in local.tiered_vpcs_sae1 : t.name => t } 58 | 59 | env_prefix = var.env_prefix 60 | region_az_labels = var.region_az_labels 61 | tiered_vpc = each.value 62 | } 63 | 64 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_use1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_use1 = [ 3 | { 4 | name = "app1" 5 | network_cidr = "10.0.0.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.0.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "10.0.3.0/28", special = true }, 15 | { name = "haproxy1", cidr = "10.0.4.64/26" } 16 | ] 17 | } 18 | b = { 19 | private_subnets = [ 20 | { name = "cluster2", cidr = "10.0.10.0/24" }, 21 | { name = "random2", cidr = "10.0.11.0/24", special = true } 22 | ] 23 | } 24 | } 25 | }, 26 | { 27 | name = "general1" 28 | network_cidr = "192.168.0.0/20" 29 | azs = { 30 | a = { 31 | private_subnets = [ 32 | { name = "data1", cidr = "192.168.0.0/24" }, 33 | { name = "data2", cidr = "192.168.1.0/24", special = true } 34 | ] 35 | } 36 | c = { 37 | private_subnets = [ 38 | { name = "experiment1", cidr = "192.168.10.0/24" }, 39 | { name = "experiment2", cidr = "192.168.11.0/24" } 40 | ] 41 | public_subnets = [ 42 | { name = "random1", cidr = "192.168.13.0/28", special = true }, 43 | { name = "haproxy1", cidr = "192.168.14.64/26" } 44 | ] 45 | } 46 | } 47 | } 48 | ] 49 | } 50 | 51 | module "vpcs_use1" { 52 | source = "JudeQuintana/tiered-vpc-ng/aws" 53 | version = "1.0.1" 54 | 55 | providers = { 56 | aws = aws.use1 57 | } 58 | 59 | for_each = { for t in local.tiered_vpcs_use1 : t.name => t } 60 | 61 | env_prefix = var.env_prefix 62 | region_az_labels = var.region_az_labels 63 | tiered_vpc = each.value 64 | } 65 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_use2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_use2 = [ 3 | { 4 | name = "app9" 5 | network_cidr = "192.168.128.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "192.168.128.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "192.168.132.0/24", special = true }, 15 | ] 16 | } 17 | c = { 18 | private_subnets = [ 19 | { name = "cluster2", cidr = "192.168.134.0/24" } 20 | ] 21 | public_subnets = [ 22 | { name = "random2", cidr = "192.168.136.0/28", special = true }, 23 | ] 24 | } 25 | } 26 | }, 27 | { 28 | name = "general9" 29 | network_cidr = "172.16.128.0/20" 30 | azs = { 31 | a = { 32 | private_subnets = [ 33 | { name = "cluster4", cidr = "172.16.128.0/24" } 34 | ] 35 | public_subnets = [ 36 | { name = "random2", cidr = "172.16.132.0/28", special = true }, 37 | ] 38 | } 39 | c = { 40 | private_subnets = [ 41 | { name = "experiment1", cidr = "172.16.135.0/24" } 42 | ] 43 | public_subnets = [ 44 | { name = "random3", cidr = "172.16.136.0/28", special = true }, 45 | ] 46 | } 47 | } 48 | } 49 | ] 50 | } 51 | 52 | module "vpcs_use2" { 53 | source = "JudeQuintana/tiered-vpc-ng/aws" 54 | version = "1.0.1" 55 | 56 | providers = { 57 | aws = aws.use2 58 | } 59 | 60 | for_each = { for t in local.tiered_vpcs_use2 : t.name => t } 61 | 62 | env_prefix = var.env_prefix 63 | region_az_labels = var.region_az_labels 64 | tiered_vpc = each.value 65 | } 66 | 67 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_usw1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_usw1 = [ 3 | { 4 | name = "app2" 5 | network_cidr = "172.16.0.0/20" 6 | azs = { 7 | b = { 8 | private_subnets = [ 9 | { name = "artifacts1", cidr = "172.16.10.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "attachments1", cidr = "172.16.11.0/28", special = true } 15 | ] 16 | } 17 | c = { 18 | private_subnets = [ 19 | { name = "jenkins1", cidr = "172.16.1.0/24" } 20 | ] 21 | public_subnets = [ 22 | { name = "random1", cidr = "172.16.6.0/26" }, 23 | { name = "natgw1", cidr = "172.16.5.0/28", special = true } 24 | ] 25 | } 26 | } 27 | }, 28 | { 29 | name = "general2" 30 | network_cidr = "172.16.16.0/20" 31 | azs = { 32 | b = { 33 | private_subnets = [ 34 | { name = "artifacts2", cidr = "172.16.22.0/24" } 35 | ] 36 | public_subnets = [ 37 | { name = "random1", cidr = "172.16.23.0/28", special = true } 38 | ] 39 | } 40 | c = { 41 | private_subnets = [ 42 | { name = "jenkins2", cidr = "172.16.16.0/24" } 43 | ] 44 | public_subnets = [ 45 | { name = "random2", cidr = "172.16.19.0/28", special = true } 46 | ] 47 | } 48 | } 49 | } 50 | ] 51 | } 52 | 53 | module "vpcs_usw1" { 54 | source = "JudeQuintana/tiered-vpc-ng/aws" 55 | version = "1.0.1" 56 | 57 | providers = { 58 | aws = aws.usw1 59 | } 60 | 61 | for_each = { for t in local.tiered_vpcs_usw1 : t.name => t } 62 | 63 | env_prefix = var.env_prefix 64 | region_az_labels = var.region_az_labels 65 | tiered_vpc = each.value 66 | } 67 | 68 | -------------------------------------------------------------------------------- /mega_mesh_demo/vpcs_usw2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_usw2 = [ 3 | { 4 | name = "app10" 5 | network_cidr = "192.168.96.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "192.168.96.0/24" } 10 | ] 11 | # Enable a NAT Gateway for all private subnets in the same AZ 12 | # by adding the `natgw = true` attribute to any public subnet 13 | public_subnets = [ 14 | { name = "random1", cidr = "192.168.100.0/28", special = true }, 15 | ] 16 | } 17 | c = { 18 | private_subnets = [ 19 | { name = "cluster2", cidr = "192.168.103.0/24" } 20 | ] 21 | public_subnets = [ 22 | { name = "random2", cidr = "192.168.106.0/28", special = true }, 23 | ] 24 | } 25 | } 26 | }, 27 | { 28 | name = "general10" 29 | network_cidr = "10.0.160.0/20" 30 | azs = { 31 | a = { 32 | private_subnets = [ 33 | { name = "cluster4", cidr = "10.0.160.0/24" } 34 | ] 35 | public_subnets = [ 36 | { name = "random2", cidr = "10.0.164.0/28", special = true }, 37 | ] 38 | } 39 | c = { 40 | private_subnets = [ 41 | { name = "experiment1", cidr = "10.0.166.0/24" } 42 | ] 43 | public_subnets = [ 44 | { name = "random3", cidr = "10.0.168.0/28", special = true }, 45 | ] 46 | } 47 | } 48 | } 49 | ] 50 | } 51 | 52 | module "vpcs_usw2" { 53 | source = "JudeQuintana/tiered-vpc-ng/aws" 54 | version = "1.0.1" 55 | 56 | providers = { 57 | aws = aws.usw2 58 | } 59 | 60 | for_each = { for t in local.tiered_vpcs_usw2 : t.name => t } 61 | 62 | env_prefix = var.env_prefix 63 | region_az_labels = var.region_az_labels 64 | tiered_vpc = each.value 65 | } 66 | 67 | -------------------------------------------------------------------------------- /networking_trifecta_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /networking_trifecta_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.31.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /networking_trifecta_demo/README.md: -------------------------------------------------------------------------------- 1 | ## Networking Trifecta Demo 2 | Blog Post: 3 | [Terraform Networking Trifecta ](https://jq1.io/posts/tnt/) 4 | 5 | All the modules in this project have been [published](https://jq1.io/posts/finally_published_to_public_registry/) to the Terraform Cloud 6 | Public Registry and used in this demo. 7 | 8 | # Goal 9 | Using the latest Terraform (v1.3+) and AWS Provider (v4.20.0+) 10 | to route between 3 VPCs with different IPv4 CIDR ranges (RFC 1918) 11 | using a Transit Gateway AKA a hub spoke topology. 12 | 13 | - App VPC Tier: `10.0.0.0/20` (Class A Private Internet) 14 | - CICD VPC Tier: `172.16.0.0/20` (Class B Private Internet) 15 | - General VPC Tier: `192.168.0.0/20` (Class C Private Internet) 16 | 17 | Example VPC-NG architecture subnets: 18 | ![vpc-ng](https://jq1-io.s3.amazonaws.com/base/aws-vpc.png) 19 | 20 | The resulting architecture is a hub spoke topology (zoom out): 21 | ![tnt](https://jq1-io.s3.amazonaws.com/tnt/tnt.png) 22 | 23 | Modules: 24 | - [Tiered VPC-NG](https://github.com/JudeQuintana/terraform-modules/tree/master/networking/tiered_vpc_ng) 25 | - [Intra VPC Security Group Rule](https://github.com/JudeQuintana/terraform-modules/tree/master/networking/intra_vpc_security_group_rule_for_tiered_vpc_ng) 26 | - [Transit Gateway Centralized Router](https://github.com/JudeQuintana/terraform-modules/tree/master/networking/transit_gateway_centralized_router_for_tiered_vpc_ng) 27 | 28 | Main: 29 | - [Networking Trifecta Demo](https://github.com/JudeQuintana/terraform-main/tree/main/networking_trifecta_demo) 30 | - See [Trifecta Demo Time](https://jq1.io/posts/tnt/#trifecta-demo-time) for instructions. 31 | 32 | # Caveats 33 | The modules build resources that will cost some money but should be minimal for the demo. (ie NATGW, EIP, TGW) 34 | 35 | Even though you can delete subnets in a VPC, remember that the NAT Gateways get created in the public subnets labeled as special for the AZ and is used for VPC attachments when passed to a Centralized Router. 36 | 37 | No overlapping CIDR detection or validation since the AWS provider will take care of that. 38 | 39 | When modifying an AZ or VPCs in an existing configuration with a TGW Centralized Router: 40 | - Adding an AZ or VPC. 41 | - The VPCs must be applied first. 42 | - Then apply Intra Security Groups Rules and TGW Centralized Router. 43 | - Removing 44 | - An AZ being removed must have it's (special) public subnet for the AZ manually removed (modified) from the TGW VPC attachment then wait until state goes from `Modifying` to `Available` before applying (destroying) the AZ. 45 | - A VPC being removed must have it's TGW attachment manually deleted then wait until state goes from `Deleting` to `Deleted` before applying (destroying) the VPC. 46 | - Then apply Centralized Router to clean up routes in other VPCs that were pointing to the VPC that was deleted. 47 | - Terraform should detect the manually deleted resources for vpc attachment, route table assocition, route propagation, etc and remove them from state. 48 | - Then apply Intra VPC Security Group Rule to clean up SG Rules for the deleted VPC. 49 | - Full teardown (destroy) works fine. 50 | 51 | # Trifecta Demo Time 52 | 53 | **This will be a demo of the following:** 54 | - Configure `us-west-2a` and `us-west-2b` AZs in `app` VPC - `10.0.0.0/20` 55 | - Launch `app-public` instance in public subnet. 56 | - Configure `us-west-2b` AZ with NATGW in `cicd` VPC - `172.16.0.0/20` 57 | - Launch `cicd-private` instance in private subnet. 58 | - Configure `us-west-2c` AZ in `general` VPC - `192.168.0.0/20` 59 | - Launch `general-private` instance in private subnet. 60 | - Configure security groups for access across VPCs. 61 | - Allow ssh and ping. 62 | - Configure routing between all public and private subnets accross VPCs 63 | via TGW. 64 | - Verify connectivity with `t2.micro` EC2 instances. 65 | - Minimal assembly required. 66 | 67 | **Pre-requisites:** 68 | - git 69 | - curl 70 | - Terraform 1.3.0+ 71 | - Pre-configured AWS credentials 72 | - An AWS EC2 Key Pair should already exist in the `us-west-2` region and the private key should have 73 | user read only permissions. 74 | - private key saved as `~/.ssh/my-ec2-key.pem` on local machine. 75 | - must be user read only permssions `chmod 400 ~/.ssh/my-ec2-key.pem` 76 | 77 | **Assemble the Trifecta** by cloning the [Networking Trifecta Demo](https://github.com/JudeQuintana/terraform-main/) repo. 78 | ``` 79 | $ git clone git@github.com:JudeQuintana/terraform-main.git 80 | $ cd networking_trifecta_demo 81 | ``` 82 | 83 | Update the `var.base_ec2_instance_attributes.key_name` in [variables.tf](https://github.com/JudeQuintana/terraform-main/blob/main/networking_trifecta_demo/variables.tf#L20) with the EC2 key pair name you're using for the `us-west-2` region (see pre-requisites above). 84 | 85 | Note: the AMI used is lookedup via data source 86 | ``` 87 | # snippet 88 | variable "base_ec2_instance_attributes" { 89 | ... 90 | default = { 91 | key_name = "my-ec2-key" # EC2 key pair name to use when launching an instance 92 | instance_type = "t2.micro" 93 | } 94 | } 95 | ``` 96 | 97 | Apply VPCs (must be applied first): 98 | ``` 99 | $ terraform init 100 | $ terraform apply -target module.vpcs 101 | ``` 102 | 103 | Now we'll: 104 | - Build S3 Gateways, security groups rules to allow ssh and ping across VPCs. 105 | - Launch instances in each enabled AZ for all VPCs. 106 | - Route between VPCs via TGW. 107 | ``` 108 | $ terraform apply -target aws_vpc_endpoint.s3 -target module.intra_vpc_security_group_rules -target aws_instance.instances -target module.centralized_router 109 | ``` 110 | 111 | Once the apply is complete, it will take 1-2 minutes for the TGW 112 | routing to fully propagate. 113 | 114 | **Verify Connectivity Between VPCs** 115 | ``` 116 | $ chmod u+x ./scripts/get_instance_info.sh 117 | $ ./scripts/get_instance_info.sh 118 | ``` 119 | 120 | Example output: 121 | ``` 122 | # module.vpcs["app"].aws_vpc.this 123 | default_security_group_id = "sg-12345678" 124 | 125 | # aws_instance.instances["app-public"] 126 | private_ip = "10.0.3.200" 127 | public_ip = "54.187.241.115" 128 | 129 | # aws_instance.instances["general-private"] 130 | private_ip = "192.168.10.8" 131 | 132 | # aws_instance.instances["cicd-private"] 133 | private_ip = "172.16.5.11" 134 | 135 | # My Public IP 136 | XX.XX.XX.XX 137 | 138 | # If you have awscli configured follow the instructions below otherwise you have to do it manually in the AWS console 139 | # AWS CLI Command to copy ("q" to exit returned output): 140 | 141 | aws ec2 authorize-security-group-ingress --region us-west-2 --group-id "sg-12345678" --protocol tcp --port 22 --cidr XX.XX.XX.XX/32 142 | ``` 143 | 144 | Run the `awscli` command from the output above to add an inbound ssh rule from "My Public IP" to the default security group id of the App VPC. 145 | 146 | Next, ssh to the `app-public` instance public IP (ie `54.187.241.115`) using the EC2 key pair private key. 147 | 148 | Then, ssh to the `private_ip` of the `general-private` instance, then ssh to `cicd-private`, then ssh back to `app-public`. 149 | ``` 150 | $ ssh -i ~/.ssh/my-ec2-key.pem -A ec2-user@54.187.241.115 151 | 152 | [ec2-user@app-public ~]$ ping google.com # works! via igw 153 | [ec2-user@app-public ~]$ ping 192.168.10.8 # works! via tgw 154 | [ec2-user@app-public ~]$ ssh 192.168.10.8 155 | 156 | [ec2-user@general-private ~]$ ping google.com # doesn't work! no natgw 157 | [ec2-user@general-private ~]$ ping 172.16.5.11 # works! via tgw 158 | [ec2-user@general-private ~]$ ssh 172.16.5.11 159 | 160 | [ec2-user@cicd-private ~]$ ping google.com # works! via natgw 161 | [ec2-user@cicd-private ~]$ ping 10.0.3.200 # works! via tgw 162 | [ec2-user@cicd-private ~]$ ssh 10.0.3.200 163 | 164 | [ec2-user@app-public ~]$ 165 | ``` 166 | 167 | 🔻 Trifecta Complete!!! 168 | 169 | **Clean Up** 170 | ``` 171 | $ terraform destroy 172 | ``` 173 | 174 | -------------------------------------------------------------------------------- /networking_trifecta_demo/centralized_router.tf: -------------------------------------------------------------------------------- 1 | # This TGW Centralized router module will attach all vpcs (attachment for each AZ) to one TGW 2 | # associate and propagate to a single route table 3 | # generate and add routes in each VPC to all other networks. 4 | module "centralized_router" { 5 | source = "JudeQuintana/centralized-router/aws" 6 | version = "1.0.1" 7 | 8 | env_prefix = var.env_prefix 9 | region_az_labels = var.region_az_labels 10 | centralized_router = { 11 | name = "gambit" 12 | amazon_side_asn = 64512 13 | blackhole_cidrs = ["172.16.8.0/24"] 14 | vpcs = module.vpcs 15 | } 16 | } 17 | 18 | output "centralized_router" { 19 | value = module.centralized_router 20 | } 21 | -------------------------------------------------------------------------------- /networking_trifecta_demo/instances.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # create a vpc_name map so I can call vpcs by name so i dont have 3 | # to type it as a string in places that i need it. 4 | # ie local.tiered_vpc_names.app will be "app" 5 | tiered_vpc_names = { for v in module.vpcs : v.name => v.name } 6 | 7 | instances = [ 8 | { 9 | # app-public 10 | name = format("%s-public", local.tiered_vpc_names.app) 11 | # lookup the public subnet id for the 'random1' subnet in the 'a' AZ for the 'app' VPC 12 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.app).public_subnet_name_to_subnet_id, "random1") 13 | vpc_security_group_ids = [ 14 | lookup(module.vpcs, local.tiered_vpc_names.app).default_security_group_id, 15 | lookup(module.vpcs, local.tiered_vpc_names.app).intra_vpc_security_group_id 16 | ] 17 | }, 18 | { 19 | # cicd-private 20 | name = format("%s-private", local.tiered_vpc_names.cicd) 21 | # lookup the private subnet id for the 'jenkins1' subnet in AZ 'b' for the 'cicd' VPC 22 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.cicd).private_subnet_name_to_subnet_id, "jenkins1") 23 | vpc_security_group_ids = [ 24 | lookup(module.vpcs, local.tiered_vpc_names.cicd).default_security_group_id, 25 | lookup(module.vpcs, local.tiered_vpc_names.cicd).intra_vpc_security_group_id 26 | ] 27 | }, 28 | { 29 | # general-private 30 | name = format("%s-private", local.tiered_vpc_names.general) 31 | # lookup the private subnet id for the 'db1' subnet in AZ 'c' for the 'general' VPC 32 | subnet_id = lookup(lookup(module.vpcs, local.tiered_vpc_names.general).private_subnet_name_to_subnet_id, "db1") 33 | vpc_security_group_ids = [ 34 | lookup(module.vpcs, local.tiered_vpc_names.general).default_security_group_id, 35 | lookup(module.vpcs, local.tiered_vpc_names.general).intra_vpc_security_group_id 36 | ] 37 | } 38 | ] 39 | } 40 | 41 | data "aws_ami" "al2023" { 42 | owners = ["amazon"] 43 | most_recent = true 44 | 45 | filter { 46 | name = "name" 47 | values = ["amzn2-ami-kernel-5.10-hvm-2.0.*"] 48 | } 49 | 50 | filter { 51 | name = "architecture" 52 | values = ["x86_64"] 53 | } 54 | } 55 | 56 | # The .ssh/config is forwarding the private key to any host 57 | # so you can easily ssh to each instance since instances are 58 | # ssh key only. 59 | # It's a very insecure configuration and is used just for this demo 60 | # and shouldn't be used in production. 61 | resource "aws_instance" "instances" { 62 | for_each = { for i in local.instances : i.name => i } 63 | 64 | ami = data.aws_ami.al2023.id 65 | instance_type = var.ec2_instances.instance_type 66 | key_name = var.ec2_instances.key_name 67 | subnet_id = each.value.subnet_id 68 | vpc_security_group_ids = each.value.vpc_security_group_ids 69 | user_data = < r } 25 | 26 | env_prefix = var.env_prefix 27 | region_az_labels = var.region_az_labels 28 | intra_vpc_security_group_rule = { 29 | rule = each.value 30 | vpcs = module.vpcs 31 | } 32 | } 33 | 34 | -------------------------------------------------------------------------------- /networking_trifecta_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "ec2_instances" { 8 | description = "base attributes for building in us-west-2" 9 | type = object({ 10 | key_name = string 11 | instance_type = string 12 | }) 13 | default = { 14 | key_name = "my-ec2-key" # EC2 key pair name to use when launching an instance in us-west-2 15 | instance_type = "t2.micro" 16 | } 17 | } 18 | 19 | variable "region_az_labels" { 20 | description = "Update this map with regions and AZs that will be in use for short name labeling" 21 | type = map(string) 22 | default = { 23 | us-west-2 = "usw2" 24 | us-west-2a = "usw2a" 25 | us-west-2b = "usw2b" 26 | us-west-2c = "usw2c" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /networking_trifecta_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /networking_trifecta_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpcs_with_private_route_table_ids = { for this in module.vpcs : this.name => this if length(this.private_route_table_ids) > 0 } 4 | } 5 | 6 | resource "aws_vpc_endpoint" "s3" { 7 | for_each = local.vpcs_with_private_route_table_ids 8 | 9 | vpc_id = each.value.id 10 | service_name = format("com.amazonaws.%s.s3", each.value.region) 11 | vpc_endpoint_type = "Gateway" 12 | route_table_ids = each.value.private_route_table_ids 13 | } 14 | 15 | -------------------------------------------------------------------------------- /networking_trifecta_demo/vpcs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs = [ 3 | { 4 | name = "app" 5 | network_cidr = "10.0.0.0/20" 6 | azs = { 7 | a = { 8 | public_subnets = [ 9 | { name = "random1", cidr = "10.0.3.0/28" }, 10 | { name = "haproxy1", cidr = "10.0.4.0/26" }, 11 | { name = "other", cidr = "10.0.10.0/28", special = true } 12 | ] 13 | } 14 | b = { 15 | private_subnets = [ 16 | { name = "cluster2", cidr = "10.0.1.0/24" }, 17 | { name = "random2", cidr = "10.0.5.0/24", special = true } 18 | ] 19 | } 20 | } 21 | }, 22 | { 23 | name = "cicd" 24 | network_cidr = "172.16.0.0/20" 25 | azs = { 26 | b = { 27 | private_subnets = [ 28 | { name = "jenkins1", cidr = "172.16.5.0/24" } 29 | ] 30 | # Enable a NAT Gateway for all private subnets in the same AZ 31 | # by adding the `natgw = true` attribute to any public subnet 32 | # `special` and `natgw` can also be enabled together on a public subnet 33 | public_subnets = [ 34 | { name = "other", cidr = "172.16.8.0/28", special = true }, 35 | { name = "natgw", cidr = "172.16.8.16/28", natgw = true } 36 | ] 37 | } 38 | } 39 | }, 40 | { 41 | name = "general" 42 | network_cidr = "192.168.0.0/20" 43 | azs = { 44 | c = { 45 | private_subnets = [ 46 | { name = "db1", cidr = "192.168.10.0/24", special = true } 47 | ] 48 | } 49 | } 50 | } 51 | ] 52 | } 53 | 54 | module "vpcs" { 55 | source = "JudeQuintana/tiered-vpc-ng/aws" 56 | version = "1.0.1" 57 | 58 | for_each = { for t in local.tiered_vpcs : t.name => t } 59 | 60 | env_prefix = var.env_prefix 61 | region_az_labels = var.region_az_labels 62 | tiered_vpc = each.value 63 | } 64 | 65 | -------------------------------------------------------------------------------- /super_router_demo/.terraform-version: -------------------------------------------------------------------------------- 1 | 1.11.4 2 | -------------------------------------------------------------------------------- /super_router_demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.95.0" 6 | constraints = ">= 4.20.0, >= 5.31.0, ~> 5.95" 7 | hashes = [ 8 | "h1:PUug/LLWa4GM08rXqnmCVUXj8ibCTvQxgvawhat3bMo=", 9 | "zh:20aac8c95edd444e659f235d19fa6af9b259c5a70fce19d400539ee88687e7d4", 10 | "zh:29c55846fadd19dde0c5108f74d507c296d6c37cabdd466a96d3721a7c261743", 11 | "zh:325fa5cb42d58c9203c279450863c49e534672f7101c067af465f9d7f4be3be5", 12 | "zh:4f18c643584f7ba554399c0db3dd1c81629dfc2508a8777890f9f3b80b5213b7", 13 | "zh:561e38e9cc6f0be5470c187ea8d51047c4133d9cb74cc1c364a9ebe41f40a06b", 14 | "zh:6ec2cceed96ca5e47591ef11686614c663b05e112a814d24246a2739066577b6", 15 | "zh:710a227c02b8a50f75a82a7f063d2416e85783e02ed91bb22cc12e7a8e11a3cf", 16 | "zh:97a2f5e9bf4cf9a38274eddb7967e1cb4e5b04960c7da3603d9b1c15e18b8626", 17 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 18 | "zh:bf6bfb01fff8226d86c1b219d67cd96f37bb9312b17d00340e6ff00dda2dbe82", 19 | "zh:cba74d606149cbaaa8dfb69f369f2496b851643a879adc24b11515fcece42b66", 20 | "zh:d5a2c36739cab677a48f4856958c96be6f018ff0da50d233ca93a3a21aaceca1", 21 | "zh:df5d1466144852fe5da4af0628db6f02b5186c59f683e5085705d9b90cacfbc0", 22 | "zh:f82d96b45983b3c73b78dced9e344512b7a9adb06e8c1e3e4f422605efbb756d", 23 | "zh:fb523f787077270059a8f3ab52c0fc56257c0b3a06f0219be247c8b15ff0ca2a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /super_router_demo/Readme.md: -------------------------------------------------------------------------------- 1 | # Super Router 2 | Build a decentralized hub and spoke topology both intra-region and cross-region. 3 | 4 | Related articles: 5 | - Original Blog Post in [Super Powered, Super Sharp, Super Router!](https://jq1.io/posts/super_router/) 6 | - Fresh new decentralized design in [$init super refactor](https://jq1.io/posts/init_super_refactor/). 7 | - New features means new steez in [Slappin chrome on the WIP](https://jq1.io/posts/slappin_chrome_on_the_wip/)! 8 | 9 | Demo: 10 | - Pre-requisite: AWS account, may need to increase your VPC and or TGW quota for 11 | each us-east-1 and us-west-2 depending on how many you currently have. 12 | This demo will be creating 4 more VPCs in each region (8 total) and 3 TGWs in each region (6 total) 13 | - [Super Router](https://github.com/JudeQuintana/terraform-modules/tree/master/networking/tgw_super_router_for_tgw_centralized_router) module provides both intra-region and cross-region peering and routing for Centralized Routers and Tiered VPCs (same AWS account only, no cross account). 14 | 15 | The resulting architecture is a decentralized hub spoke topology: 16 | ![super-router-shokunin](https://jq1-io.s3.amazonaws.com/super-router/super-router-shokunin.png) 17 | 18 | It begins: 19 | - `terraform init` 20 | 21 | Apply Tiered-VPCs (must exist before Centralized Routers) and S3 Gateways: 22 | - `terraform apply -target module.vpcs_usw2 -target module.vpcs_another_usw2 -target module.vpcs_use1 -target module.vpcs_another_use1` 23 | 24 | Apply Centralized Routers (must exist before Super Router), Intra VPC Security Group Rules and S3 Gateways: 25 | - `terraform apply -target module.centralized_routers_usw2 -target module.centralized_routers_use1 -target module.intra_vpc_security_group_rules_usw2 -target module.intra_vpc_security_group_rules_use1 -target aws_vpc_endpoint.s3_use1 -target aws_vpc_endpoint.s3_usw2` 26 | 27 | Apply Super Router and Super Intra VPC Security Group Rules: 28 | - `terraform apply -target module.super_router_usw2_to_use1 -target module.super_intra_vpc_security_group_rules_usw2_to_use1` 29 | 30 | The Super Router is now complete! 31 | 32 | Note: If we were using this in Terraform Cloud then it would be best for each of the module applys above to be in their own separate networking workspace with triggers. For example, if a VPC or AZ is added in it's own VPC workspace then apply and trigger the centralized router workspace to build routes, then trigger a workspace to add to Super Router.) 33 | 34 | Routing and peering Validation with AWS Route Analyzer: 35 | - Go to [AWS Network Manager](https://us-west-2.console.aws.amazon.com/networkmanager/home?region=us-east-1#/networks) (free to use) 36 | - Create global network -> `next` 37 | - UNCHECK `Add core network in your global network` or you will be billed extra -> `next` 38 | - Select new global network -> go to `Transit Gateways` -> `Register 39 | Transit Gateway` -> Select TGWs -> `Register Transit Gateway` -> wait until all states say `Available` 40 | - Go to `Transit gateway network` -> `Route Analyzer` 41 | - Intra-Region Test 1 (usw2c to usw2a) 42 | - Source: 43 | - Transit Gateway: Choose `TEST-centralized-router-thunderbird-usw2` 44 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general1-usw2 <-> TEST-centralized-router-thunderbird-usw2` (VPC) 45 | - IP Address: `192.168.16.7` (`experiment1` private subnet) 46 | - Destination: 47 | - Transit Gateway: Choose `TEST-centralized-router-storm-usw2` 48 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-cicd1-usw2 <-> TEST-centralized-router-storm-usw2` (VPC) 49 | - IP Address: `172.16.6.9` (`random1` public subnet) 50 | - Select `Run Route Analysis` 51 | - Forward and Return Paths should both have a `Connected` status. 52 | - Intra-Region Test 2 (use1c to use1c) 53 | - Source: 54 | - Transit Gateway: Choose `TEST-centralized-router-bishop-use1` 55 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-infra2-use1 <-> TEST-centralized-router-bishop-use1` (VPC) 56 | - IP Address: `192.168.32.8` (`db1` private subnet) 57 | - Destination: 58 | - Transit Gateway: Choose `TEST-centralized-router-wolverine-use1` 59 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app2-use1 <-> TEST-centralized-router-wolverine-use1` (VPC) 60 | - IP Address: `10.0.0.4` (`cluster1` private subnet) 61 | - Select `Run Route Analysis` 62 | - Forward and Return Paths should both have a `Connected` status. 63 | - Cross-Region Test 1 (usw2a to use1c) 64 | - Source: 65 | - Transit Gateway: Choose `TEST-centralized-router-thunderbird-usw2` 66 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-app1-usw2 <-> TEST-centralized-router-thunderbird-usw2` (VPC) 67 | - IP Address: `10.0.19.5` (`random1` public subnet) 68 | - Destination: 69 | - Transit Gateway: Choose `TEST-centralized-router-wolverine-use1` 70 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-general2-use1 <-> TEST-centralized-router-wolverine-use1` (VPC) 71 | - IP Address: `192.168.11.6` (`experiment2` private subnet) 72 | - Select `Run Route Analysis` 73 | - Forward and Return Paths should both have a `Connected` status. 74 | - Cross-Region Test 2 (use1a to usw2c) 75 | - Source: 76 | - Transit Gateway: Choose `TEST-centralized-router-bishop-use1` 77 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-cicd2-use1 <-> TEST-centralized-router-bishop-use1` (VPC) 78 | - IP Address: `10.0.32.3` (`jenkins1` private subnet) 79 | - Destination: 80 | - Transit Gateway: Choose `TEST-centralized-router-storm-usw2` 81 | - Transit Gateway Attachment: Choose `TEST-tiered-vpc-infra1-usw2 <-> TEST-centralized-router-storm-usw2` (VPC) 82 | - IP Address: `172.16.16.6` (`jenkins2` private subnet) 83 | - Select `Run Route Analysis` 84 | - Forward and Return Paths should both have a `Connected` status. 85 | 86 | Several other routes can be validated, try them out! 87 | 88 | Tear down: 89 | - `terraform destroy` (long delay to get to yes or no prompt, be patient) 90 | 91 | -------------------------------------------------------------------------------- /super_router_demo/blackhole_cidrs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # blackhole cidr app1 usw2b haproxy2 public subnet on all centralized routers and both super routers 3 | blackhole_cidrs = ["10.0.31.64/26"] 4 | } 5 | -------------------------------------------------------------------------------- /super_router_demo/centralized_routers_use1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | centralized_routers_use1 = [ 3 | { 4 | name = "wolverine" 5 | amazon_side_asn = 64519 6 | blackhole_cidrs = local.blackhole_cidrs 7 | vpcs = module.vpcs_use1 8 | }, 9 | { 10 | name = "bishop" 11 | amazon_side_asn = 64524 12 | blackhole_cidrs = local.blackhole_cidrs 13 | vpcs = module.vpcs_another_use1 14 | } 15 | ] 16 | } 17 | 18 | # This TGW Centralized router module will attach all vpcs (attachment for each AZ) to one TGW 19 | # associate and propagate to a single route table 20 | # generate and add routes in each VPC to all other networks. 21 | module "centralized_routers_use1" { 22 | source = "JudeQuintana/centralized-router/aws" 23 | version = "1.0.1" 24 | 25 | providers = { 26 | aws = aws.use1 27 | } 28 | 29 | for_each = { for c in local.centralized_routers_use1 : c.name => c } 30 | 31 | env_prefix = var.env_prefix 32 | region_az_labels = var.region_az_labels 33 | centralized_router = each.value 34 | } 35 | -------------------------------------------------------------------------------- /super_router_demo/centralized_routers_usw2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | centralized_routers_usw2 = [ 3 | { 4 | name = "thunderbird" 5 | amazon_side_asn = 64520 6 | blackhole_cidrs = local.blackhole_cidrs 7 | vpcs = module.vpcs_usw2 8 | }, 9 | { 10 | name = "storm" 11 | amazon_side_asn = 64525 12 | blackhole_cidrs = local.blackhole_cidrs 13 | vpcs = module.vpcs_another_usw2 14 | } 15 | ] 16 | } 17 | 18 | module "centralized_routers_usw2" { 19 | source = "JudeQuintana/centralized-router/aws" 20 | version = "1.0.1" 21 | 22 | providers = { 23 | aws = aws.usw2 24 | } 25 | 26 | for_each = { for c in local.centralized_routers_usw2 : c.name => c } 27 | 28 | env_prefix = var.env_prefix 29 | region_az_labels = var.region_az_labels 30 | centralized_router = each.value 31 | } 32 | -------------------------------------------------------------------------------- /super_router_demo/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-west-2" 3 | } 4 | 5 | provider "aws" { 6 | alias = "usw2" 7 | region = "us-west-2" 8 | } 9 | 10 | provider "aws" { 11 | alias = "use1" 12 | region = "us-east-1" 13 | } 14 | -------------------------------------------------------------------------------- /super_router_demo/security_group_rules.tf: -------------------------------------------------------------------------------- 1 | # This will create a sg rule for each vpc allowing inbound-only ports from 2 | # all other vpc networks (excluding itself) 3 | # Basically allowing ssh and ping communication between all VPCs within each region 4 | locals { 5 | intra_vpc_security_group_rules = [ 6 | { 7 | label = "ssh" 8 | protocol = "tcp" 9 | from_port = 22 10 | to_port = 22 11 | }, 12 | { 13 | label = "ping" 14 | protocol = "icmp" 15 | from_port = 8 16 | to_port = 0 17 | } 18 | ] 19 | } 20 | 21 | module "intra_vpc_security_group_rules_usw2" { 22 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 23 | version = "1.0.0" 24 | 25 | providers = { 26 | aws = aws.usw2 27 | } 28 | 29 | for_each = { for r in local.intra_vpc_security_group_rules : r.label => r } 30 | 31 | env_prefix = var.env_prefix 32 | region_az_labels = var.region_az_labels 33 | intra_vpc_security_group_rule = { 34 | rule = each.value 35 | vpcs = merge(module.vpcs_usw2, module.vpcs_another_usw2) 36 | } 37 | } 38 | 39 | module "intra_vpc_security_group_rules_use1" { 40 | source = "JudeQuintana/intra-vpc-security-group-rule/aws" 41 | version = "1.0.0" 42 | 43 | providers = { 44 | aws = aws.use1 45 | } 46 | 47 | for_each = { for r in local.intra_vpc_security_group_rules : r.label => r } 48 | 49 | env_prefix = var.env_prefix 50 | region_az_labels = var.region_az_labels 51 | intra_vpc_security_group_rule = { 52 | rule = each.value 53 | vpcs = merge(module.vpcs_use1, module.vpcs_another_use1) 54 | } 55 | } 56 | 57 | # allowing ssh and ping communication across regions 58 | module "super_intra_vpc_security_group_rules_usw2_to_use1" { 59 | source = "JudeQuintana/super-intra-vpc-security-group-rules/aws" 60 | version = "1.0.0" 61 | 62 | providers = { 63 | aws.local = aws.usw2 64 | aws.peer = aws.use1 65 | } 66 | 67 | env_prefix = var.env_prefix 68 | region_az_labels = var.region_az_labels 69 | super_intra_vpc_security_group_rules = { 70 | local = { 71 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_usw2 72 | } 73 | peer = { 74 | intra_vpc_security_group_rules = module.intra_vpc_security_group_rules_use1 75 | } 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /super_router_demo/super_router_usw2_to_use1.tf: -------------------------------------------------------------------------------- 1 | # Super Router is composed of two TGWs, one in each region. 2 | module "super_router_usw2_to_use1" { 3 | source = "JudeQuintana/super-router/aws" 4 | version = "1.0.0" 5 | 6 | providers = { 7 | aws.local = aws.usw2 # local super router tgw will be built in the aws.local provider region 8 | aws.peer = aws.use1 # peer super router tgw will be built in the aws.peer provider region 9 | } 10 | 11 | env_prefix = var.env_prefix 12 | region_az_labels = var.region_az_labels 13 | super_router = { 14 | name = "professor-x" 15 | blackhole_cidrs = local.blackhole_cidrs 16 | local = { 17 | amazon_side_asn = 64521 18 | centralized_routers = module.centralized_routers_usw2 19 | } 20 | peer = { 21 | amazon_side_asn = 64522 22 | centralized_routers = module.centralized_routers_use1 23 | } 24 | } 25 | } 26 | 27 | output "super_router" { 28 | value = module.super_router_usw2_to_use1 29 | } 30 | -------------------------------------------------------------------------------- /super_router_demo/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env_prefix" { 2 | description = "environment prefix ie test, stg, prod" 3 | type = string 4 | default = "test" 5 | } 6 | 7 | variable "region_az_labels" { 8 | description = "Update this map with regions and AZs that will be in use for short name labeling" 9 | type = map(string) 10 | default = { 11 | us-east-1 = "use1" 12 | us-east-1a = "use1a" 13 | us-east-1b = "use1b" 14 | us-east-1c = "use1c" 15 | us-west-2 = "usw2" 16 | us-west-2a = "usw2a" 17 | us-west-2b = "usw2b" 18 | us-west-2c = "usw2c" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /super_router_demo/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~>1.4" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~>5.95" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /super_router_demo/vpc_endpoints.tf: -------------------------------------------------------------------------------- 1 | # at scale we're saving money right here 2 | locals { 3 | vpc_endpoint_service_name_fmt = "com.amazonaws.%s.s3" 4 | vpc_endpoint_type = "Gateway" 5 | 6 | vpcs_use1_with_private_route_table_ids = { for this in merge(module.vpcs_use1, module.vpcs_another_use1) : this.name => this if length(this.private_route_table_ids) > 0 } 7 | vpcs_usw2_with_private_route_table_ids = { for this in merge(module.vpcs_usw2, module.vpcs_another_usw2) : this.name => this if length(this.private_route_table_ids) > 0 } 8 | } 9 | 10 | resource "aws_vpc_endpoint" "s3_use1" { 11 | provider = aws.use1 12 | 13 | for_each = local.vpcs_use1_with_private_route_table_ids 14 | 15 | vpc_id = each.value.id 16 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 17 | vpc_endpoint_type = local.vpc_endpoint_type 18 | route_table_ids = each.value.private_route_table_ids 19 | } 20 | 21 | resource "aws_vpc_endpoint" "s3_usw2" { 22 | provider = aws.usw2 23 | 24 | for_each = local.vpcs_usw2_with_private_route_table_ids 25 | 26 | vpc_id = each.value.id 27 | service_name = format(local.vpc_endpoint_service_name_fmt, each.value.region) 28 | vpc_endpoint_type = local.vpc_endpoint_type 29 | route_table_ids = each.value.private_route_table_ids 30 | } 31 | 32 | -------------------------------------------------------------------------------- /super_router_demo/vpcs_use1.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_use1 = [ 3 | { 4 | name = "app2" 5 | network_cidr = "10.0.0.0/20" 6 | azs = { 7 | a = { 8 | private_subnets = [ 9 | { name = "cluster1", cidr = "10.0.0.0/24", special = true } 10 | ] 11 | } 12 | b = { 13 | private_subnets = [ 14 | { name = "cluster2", cidr = "10.0.10.0/24" }, 15 | { name = "random2", cidr = "10.0.11.0/24" } 16 | ] 17 | # Enable a NAT Gateway for all private subnets in the same AZ 18 | # by adding the `natgw = true` attribute to any public subnet 19 | public_subnets = [ 20 | { name = "random3", cidr = "10.0.12.0/24", special = true } 21 | ] 22 | } 23 | } 24 | }, 25 | { 26 | name = "general2" 27 | network_cidr = "192.168.0.0/20" 28 | azs = { 29 | c = { 30 | private_subnets = [ 31 | { name = "experiment1", cidr = "192.168.10.0/24", special = true }, 32 | { name = "experiment2", cidr = "192.168.11.0/24" } 33 | ] 34 | } 35 | } 36 | } 37 | ] 38 | } 39 | 40 | module "vpcs_use1" { 41 | source = "JudeQuintana/tiered-vpc-ng/aws" 42 | version = "1.0.1" 43 | 44 | providers = { 45 | aws = aws.use1 46 | } 47 | 48 | for_each = { for t in local.tiered_vpcs_use1 : t.name => t } 49 | 50 | env_prefix = var.env_prefix 51 | region_az_labels = var.region_az_labels 52 | tiered_vpc = each.value 53 | } 54 | 55 | 56 | # Another 57 | locals { 58 | tiered_vpcs_another_use1 = [ 59 | { 60 | name = "cicd2" 61 | network_cidr = "10.0.32.0/20" 62 | azs = { 63 | a = { 64 | private_subnets = [ 65 | { name = "jenkins1", cidr = "10.0.32.0/24", special = true } 66 | ] 67 | } 68 | } 69 | }, 70 | { 71 | name = "infra2" 72 | network_cidr = "192.168.32.0/20" 73 | azs = { 74 | c = { 75 | private_subnets = [ 76 | { name = "db1", cidr = "192.168.32.0/24", special = true } 77 | ] 78 | } 79 | } 80 | } 81 | ] 82 | } 83 | 84 | module "vpcs_another_use1" { 85 | source = "JudeQuintana/tiered-vpc-ng/aws" 86 | version = "1.0.1" 87 | 88 | providers = { 89 | aws = aws.use1 90 | } 91 | 92 | for_each = { for t in local.tiered_vpcs_another_use1 : t.name => t } 93 | 94 | env_prefix = var.env_prefix 95 | region_az_labels = var.region_az_labels 96 | tiered_vpc = each.value 97 | } 98 | 99 | -------------------------------------------------------------------------------- /super_router_demo/vpcs_usw2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tiered_vpcs_usw2 = [ 3 | { 4 | name = "app1" 5 | network_cidr = "10.0.16.0/20" 6 | azs = { 7 | a = { 8 | public_subnets = [ 9 | { name = "random1", cidr = "10.0.19.0/28", special = true }, 10 | { name = "haproxy1", cidr = "10.0.21.64/26" } 11 | ] 12 | } 13 | b = { 14 | private_subnets = [ 15 | { name = "cluster2", cidr = "10.0.27.0/24" } 16 | ] 17 | # Enable a NAT Gateway for all private subnets in the same AZ 18 | # by adding the `natgw = true` attribute to any public subnet 19 | public_subnets = [ 20 | { name = "random2", cidr = "10.0.30.0/28", special = true }, 21 | { name = "haproxy2", cidr = "10.0.31.64/26" } 22 | ] 23 | } 24 | } 25 | }, 26 | { 27 | name = "general1" 28 | network_cidr = "192.168.16.0/20" 29 | azs = { 30 | c = { 31 | private_subnets = [ 32 | { name = "experiment1", cidr = "192.168.16.0/24", special = true } 33 | ] 34 | } 35 | } 36 | } 37 | ] 38 | } 39 | 40 | module "vpcs_usw2" { 41 | source = "JudeQuintana/tiered-vpc-ng/aws" 42 | version = "1.0.1" 43 | 44 | providers = { 45 | aws = aws.usw2 46 | } 47 | 48 | for_each = { for t in local.tiered_vpcs_usw2 : t.name => t } 49 | 50 | env_prefix = var.env_prefix 51 | region_az_labels = var.region_az_labels 52 | tiered_vpc = each.value 53 | } 54 | 55 | # Another 56 | locals { 57 | tiered_vpcs_another_usw2 = [ 58 | { 59 | name = "cicd1" 60 | network_cidr = "172.16.0.0/20" 61 | azs = { 62 | a = { 63 | private_subnets = [ 64 | { name = "jenkins1", cidr = "172.16.1.0/24" } 65 | ] 66 | public_subnets = [ 67 | { name = "random1", cidr = "172.16.6.0/26" }, 68 | { name = "various", cidr = "172.16.5.0/28", special = true } 69 | ] 70 | } 71 | } 72 | }, 73 | { 74 | name = "infra1" 75 | network_cidr = "172.16.16.0/20" 76 | azs = { 77 | c = { 78 | private_subnets = [ 79 | { name = "jenkins2", cidr = "172.16.16.0/24", special = true } 80 | ] 81 | public_subnets = [ 82 | { name = "random1", cidr = "172.16.19.0/28" } 83 | ] 84 | } 85 | } 86 | } 87 | ] 88 | } 89 | 90 | module "vpcs_another_usw2" { 91 | source = "JudeQuintana/tiered-vpc-ng/aws" 92 | version = "1.0.1" 93 | 94 | providers = { 95 | aws = aws.usw2 96 | } 97 | 98 | for_each = { for t in local.tiered_vpcs_another_usw2 : t.name => t } 99 | 100 | env_prefix = var.env_prefix 101 | region_az_labels = var.region_az_labels 102 | tiered_vpc = each.value 103 | } 104 | 105 | --------------------------------------------------------------------------------