├── README.md ├── alb.tf ├── ec2.tf ├── helper_scripts ├── ansible-hosts.template.txt ├── sysprep-bastion.sh └── sysprep-openshift.sh ├── inventory.tf ├── main.tf ├── output.tf ├── security_group.tf ├── variables.tf └── vpc.tf /README.md: -------------------------------------------------------------------------------- 1 | # openshift-terraform 2 | 3 | For more information have a look at my blog post [Deploying OpenShift 3.9 Container Platform using Terraform and Ansible on Amazon AWS](https://techbloc.net/archives/3222) and [Deploying OpenShift 3.11 Container Platform using Terraform on Amazon AWS](https://techbloc.net/archives/3428) 4 | -------------------------------------------------------------------------------- /alb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "master_alb" { 2 | name = "master" 3 | internal = false 4 | load_balancer_type = "network" 5 | subnets = ["${aws_subnet.PublicSubnetA.id}","${aws_subnet.PublicSubnetB.id}","${aws_subnet.PublicSubnetC.id}"] 6 | enable_cross_zone_load_balancing = true 7 | tags { 8 | Name = "master_alb" 9 | } 10 | } 11 | resource "aws_lb" "infra_alb" { 12 | name = "infra" 13 | subnets = ["${aws_subnet.PublicSubnetA.id}","${aws_subnet.PublicSubnetB.id}","${aws_subnet.PublicSubnetC.id}"] 14 | security_groups = ["${aws_security_group.sec_infra_alb.id}"] 15 | internal = false 16 | idle_timeout = 60 17 | tags { 18 | Name = "infra_alb" 19 | } 20 | } 21 | resource "aws_lb_target_group" "group_master_alb" { 22 | name = "master-alb-target-group" 23 | port = "8443" 24 | protocol = "TCP" 25 | vpc_id = "${aws_vpc.default.id}" 26 | tags { 27 | name = "group_master_alb" 28 | } 29 | } 30 | resource "aws_lb_target_group" "group_infra_alb" { 31 | name = "infra-alb-target-group" 32 | port = "80" 33 | protocol = "HTTP" 34 | vpc_id = "${aws_vpc.default.id}" 35 | tags { 36 | name = "group_infra_alb" 37 | } 38 | stickiness { 39 | type = "lb_cookie" 40 | cookie_duration = 1800 41 | enabled = true 42 | } 43 | health_check { 44 | healthy_threshold = 3 45 | unhealthy_threshold = 10 46 | timeout = 5 47 | interval = 10 48 | path = "/" 49 | port = 80 50 | } 51 | } 52 | resource "aws_lb_listener" "listener_master_alb" { 53 | load_balancer_arn = "${aws_lb.master_alb.arn}" 54 | port = 8443 55 | protocol = "TCP" 56 | default_action { 57 | target_group_arn = "${aws_lb_target_group.group_master_alb.arn}" 58 | type = "forward" 59 | } 60 | } 61 | resource "aws_lb_listener" "listener_infra_alb" { 62 | load_balancer_arn = "${aws_lb.infra_alb.arn}" 63 | port = 80 64 | protocol = "HTTP" 65 | default_action { 66 | target_group_arn = "${aws_lb_target_group.group_infra_alb.arn}" 67 | type = "forward" 68 | } 69 | } 70 | resource "aws_lb_target_group_attachment" "attachment_master1_alb" { 71 | target_group_arn = "${aws_lb_target_group.group_master_alb.arn}" 72 | target_id = "${aws_instance.master1.id}" 73 | port = 8443 74 | } 75 | resource "aws_lb_target_group_attachment" "attachment_master2_alb" { 76 | target_group_arn = "${aws_lb_target_group.group_master_alb.arn}" 77 | target_id = "${aws_instance.master2.id}" 78 | port = 8443 79 | } 80 | resource "aws_lb_target_group_attachment" "attachment_master3_alb" { 81 | target_group_arn = "${aws_lb_target_group.group_master_alb.arn}" 82 | target_id = "${aws_instance.master3.id}" 83 | port = 8443 84 | } 85 | resource "aws_lb_target_group_attachment" "attachment_infra1_alb" { 86 | target_group_arn = "${aws_lb_target_group.group_infra_alb.arn}" 87 | target_id = "${aws_instance.infra1.id}" 88 | port = 80 89 | } 90 | resource "aws_lb_target_group_attachment" "attachment_infra2_alb" { 91 | target_group_arn = "${aws_lb_target_group.group_infra_alb.arn}" 92 | target_id = "${aws_instance.infra2.id}" 93 | port = 80 94 | } 95 | resource "aws_lb_target_group_attachment" "attachment_infra3_alb" { 96 | target_group_arn = "${aws_lb_target_group.group_infra_alb.arn}" 97 | target_id = "${aws_instance.infra3.id}" 98 | port = 80 99 | } 100 | -------------------------------------------------------------------------------- /ec2.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "bastion" { 2 | key_name = "${var.bastion_key_name}" 3 | public_key = "${file(var.bastion_key_path)}" 4 | } 5 | resource "aws_instance" "bastion" { 6 | ami = "${lookup(var.aws_amis, var.aws_region)}" 7 | instance_type = "t2.micro" 8 | subnet_id = "${aws_subnet.PublicSubnetA.id}" 9 | security_groups = [ 10 | "${aws_security_group.sec_bastion.id}", 11 | ] 12 | associate_public_ip_address = true 13 | key_name = "${aws_key_pair.bastion.id}" 14 | user_data = "${data.template_file.sysprep-bastion.rendered}" 15 | tags { 16 | Name = "Bastion" 17 | } 18 | } 19 | resource "aws_key_pair" "openshift" { 20 | key_name = "${var.openshift_key_name}" 21 | public_key = "${file(var.openshift_key_path)}" 22 | } 23 | resource "aws_instance" "master1" { 24 | ami = "${lookup(var.aws_amis, var.aws_region)}" 25 | instance_type = "t2.large" 26 | subnet_id = "${aws_subnet.PrivateSubnetA.id}" 27 | security_groups = [ 28 | "${aws_security_group.sec_openshift.id}", 29 | ] 30 | key_name = "${aws_key_pair.openshift.id}" 31 | user_data = "${data.template_file.sysprep-openshift.rendered}" 32 | tags { 33 | Name = "Master-1" 34 | } 35 | } 36 | resource "aws_instance" "master2" { 37 | ami = "${lookup(var.aws_amis, var.aws_region)}" 38 | instance_type = "t2.large" 39 | subnet_id = "${aws_subnet.PrivateSubnetB.id}" 40 | security_groups = [ 41 | "${aws_security_group.sec_openshift.id}", 42 | ] 43 | key_name = "${aws_key_pair.openshift.id}" 44 | user_data = "${data.template_file.sysprep-openshift.rendered}" 45 | tags { 46 | Name = "Master-2" 47 | } 48 | } 49 | resource "aws_instance" "master3" { 50 | ami = "${lookup(var.aws_amis, var.aws_region)}" 51 | instance_type = "t2.large" 52 | subnet_id = "${aws_subnet.PrivateSubnetC.id}" 53 | security_groups = [ 54 | "${aws_security_group.sec_openshift.id}", 55 | ] 56 | key_name = "${aws_key_pair.openshift.id}" 57 | user_data = "${data.template_file.sysprep-openshift.rendered}" 58 | tags { 59 | Name = "Master-3" 60 | } 61 | } 62 | resource "aws_instance" "worker1" { 63 | ami = "${lookup(var.aws_amis, var.aws_region)}" 64 | instance_type = "t2.large" 65 | subnet_id = "${aws_subnet.PrivateSubnetA.id}" 66 | security_groups = [ 67 | "${aws_security_group.sec_openshift.id}", 68 | ] 69 | key_name = "${aws_key_pair.openshift.id}" 70 | user_data = "${data.template_file.sysprep-openshift.rendered}" 71 | tags { 72 | Name = "Worker-1" 73 | } 74 | } 75 | resource "aws_instance" "worker2" { 76 | ami = "${lookup(var.aws_amis, var.aws_region)}" 77 | instance_type = "t2.large" 78 | subnet_id = "${aws_subnet.PrivateSubnetB.id}" 79 | security_groups = [ 80 | "${aws_security_group.sec_openshift.id}", 81 | ] 82 | key_name = "${aws_key_pair.openshift.id}" 83 | user_data = "${data.template_file.sysprep-openshift.rendered}" 84 | tags { 85 | Name = "Worker-2" 86 | } 87 | } 88 | resource "aws_instance" "worker3" { 89 | ami = "${lookup(var.aws_amis, var.aws_region)}" 90 | instance_type = "t2.large" 91 | subnet_id = "${aws_subnet.PrivateSubnetC.id}" 92 | security_groups = [ 93 | "${aws_security_group.sec_openshift.id}", 94 | ] 95 | key_name = "${aws_key_pair.openshift.id}" 96 | user_data = "${data.template_file.sysprep-openshift.rendered}" 97 | tags { 98 | Name = "Worker-3" 99 | } 100 | } 101 | resource "aws_instance" "infra1" { 102 | ami = "${lookup(var.aws_amis, var.aws_region)}" 103 | instance_type = "t2.large" 104 | subnet_id = "${aws_subnet.PrivateSubnetA.id}" 105 | security_groups = [ 106 | "${aws_security_group.sec_openshift.id}", 107 | ] 108 | key_name = "${aws_key_pair.openshift.id}" 109 | user_data = "${data.template_file.sysprep-openshift.rendered}" 110 | tags { 111 | Name = "Infra-1" 112 | } 113 | } 114 | resource "aws_instance" "infra2" { 115 | ami = "${lookup(var.aws_amis, var.aws_region)}" 116 | instance_type = "t2.large" 117 | subnet_id = "${aws_subnet.PrivateSubnetB.id}" 118 | security_groups = [ 119 | "${aws_security_group.sec_openshift.id}", 120 | ] 121 | key_name = "${aws_key_pair.openshift.id}" 122 | user_data = "${data.template_file.sysprep-openshift.rendered}" 123 | tags { 124 | Name = "Infra-2" 125 | } 126 | } 127 | resource "aws_instance" "infra3" { 128 | ami = "${lookup(var.aws_amis, var.aws_region)}" 129 | instance_type = "t2.large" 130 | subnet_id = "${aws_subnet.PrivateSubnetC.id}" 131 | security_groups = [ 132 | "${aws_security_group.sec_openshift.id}", 133 | ] 134 | key_name = "${aws_key_pair.openshift.id}" 135 | user_data = "${data.template_file.sysprep-openshift.rendered}" 136 | tags { 137 | Name = "Infra-3" 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /helper_scripts/ansible-hosts.template.txt: -------------------------------------------------------------------------------- 1 | 2 | [OSEv3:children] 3 | masters 4 | etcd 5 | nodes 6 | 7 | [OSEv3:vars] 8 | ansible_ssh_user=centos 9 | ansible_become=true 10 | 11 | openshift_metrics_install_metrics=false 12 | openshift_metrics_start_cluster=false 13 | 14 | openshift_deployment_type=origin 15 | openshift_install_examples=false 16 | 17 | openshift_release=v3.9 18 | 19 | openshift_check_min_host_memory_gb=8 20 | openshift_disable_check=docker_image_availability,docker_storage,disk_availability 21 | 22 | containerized=true 23 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 24 | openshift_master_htpasswd_users={'demo': '$apr1$.MaA77kd$Rlnn6RXq9kCjnEfh5I3w/.'} 25 | 26 | openshift_hosted_router_selector='region=infra' 27 | openshift_master_default_subdomain=${public_subdomain} 28 | 29 | openshift_hosted_registry_selector='region=infra' 30 | 31 | osm_default_node_selector='region=primary' 32 | 33 | [masters] 34 | ${master1_hostname} 35 | ${master2_hostname} 36 | ${master3_hostname} 37 | 38 | [etcd] 39 | ${master1_hostname} 40 | ${master2_hostname} 41 | ${master3_hostname} 42 | 43 | [nodes] 44 | ${master1_hostname} openshift_hostname=${master1_hostname} openshift_public_hostname=${admin_hostname} 45 | ${master2_hostname} openshift_hostname=${master2_hostname} openshift_public_hostname=${admin_hostname} 46 | ${master3_hostname} openshift_hostname=${master3_hostname} openshift_public_hostname=${admin_hostname} 47 | ${infra1_hostname} openshift_hostname=${infra1_hostname} openshift_node_labels="{'region': 'infra', 'zone': 'default'}" 48 | ${infra2_hostname} openshift_hostname=${infra2_hostname} openshift_node_labels="{'region': 'infra', 'zone': 'default'}" 49 | ${infra3_hostname} openshift_hostname=${infra3_hostname} openshift_node_labels="{'region': 'infra', 'zone': 'default'}" 50 | ${worker1_hostname} openshift_hostname=${worker1_hostname} openshift_schedulable=true openshift_node_labels="{'region': 'primary', 'zone': 'default'}" 51 | ${worker2_hostname} openshift_hostname=${worker2_hostname} openshift_schedulable=true openshift_node_labels="{'region': 'primary', 'zone': 'default'}" 52 | ${worker3_hostname} openshift_hostname=${worker3_hostname} openshift_schedulable=true openshift_node_labels="{'region': 'primary', 'zone': 'default'}" 53 | -------------------------------------------------------------------------------- /helper_scripts/sysprep-bastion.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | sudo yum update -y 4 | sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 5 | sudo yum install -y "@Development Tools" python2-pip openssl-devel python-devel gcc libffi-devel 6 | git clone -b release-3.9 https://github.com/openshift/openshift-ansible 7 | # git clone -b release-3.7 https://github.com/openshift/openshift-ansible 8 | sudo yum install -y ansible 9 | sudo reboot 10 | -------------------------------------------------------------------------------- /helper_scripts/sysprep-openshift.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | sudo yum update -y 4 | sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 5 | sudo yum install -y wget git net-tools bind-utils iptables-services bridge-utils bash-completion httpd-tools 6 | sudo yum-config-manager --enable rhui-REGION-rhel-server-extras 7 | sudo yum install -y docker NetworkManager 8 | 9 | sudo bash -c 'cat < /etc/sysconfig/docker-storage-setup 10 | DEVS=/dev/xvdf 11 | VG=docker-vg 12 | EOF' 13 | sudo docker-storage-setup 14 | 15 | sudo systemctl stop docker 16 | sudo systemctl enable docker 17 | sudo rm -rf /var/lib/docker/* 18 | sudo systemctl restart docker 19 | 20 | sudo reboot 21 | -------------------------------------------------------------------------------- /inventory.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "inventory" { 2 | template = "${file("${path.cwd}/helper_scripts/ansible-hosts.template.txt")}" 3 | vars { 4 | public_subdomain = "${aws_lb.infra_alb.dns_name}" 5 | admin_hostname = "${aws_lb.master_alb.dns_name}" 6 | master1_hostname = "${aws_instance.master1.private_dns}" 7 | master2_hostname = "${aws_instance.master2.private_dns}" 8 | master3_hostname = "${aws_instance.master3.private_dns}" 9 | infra1_hostname = "${aws_instance.infra1.private_dns}" 10 | infra2_hostname = "${aws_instance.infra2.private_dns}" 11 | infra3_hostname = "${aws_instance.infra3.private_dns}" 12 | worker1_hostname = "${aws_instance.worker1.private_dns}" 13 | worker2_hostname = "${aws_instance.worker2.private_dns}" 14 | worker3_hostname = "${aws_instance.worker3.private_dns}" 15 | } 16 | } 17 | resource "local_file" "inventory" { 18 | content = "${data.template_file.inventory.rendered}" 19 | filename = "${path.cwd}/inventory/ansible-hosts" 20 | } 21 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # Specify the provider and access details 2 | provider "aws" { 3 | region = "${var.aws_region}" 4 | } 5 | # Declare the data source 6 | data "aws_availability_zones" "available" {} 7 | data "template_file" "sysprep-bastion" { 8 | template = "${file("./helper_scripts/sysprep-bastion.sh")}" 9 | } 10 | data "template_file" "sysprep-openshift" { 11 | template = "${file("./helper_scripts/sysprep-openshift.sh")}" 12 | } 13 | -------------------------------------------------------------------------------- /output.tf: -------------------------------------------------------------------------------- 1 | output "openshift master" { 2 | value = "${aws_lb.master_alb.dns_name}" 3 | } 4 | output "openshift subdomain" { 5 | value = "${aws_lb.infra_alb.dns_name}" 6 | } 7 | output "bastion" { 8 | value = "${aws_instance.bastion.public_dns}" 9 | } 10 | -------------------------------------------------------------------------------- /security_group.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "sec_bastion" { 2 | name = "sec_bastion" 3 | description = "Used for bastion instance" 4 | vpc_id = "${aws_vpc.default.id}" 5 | ingress { 6 | from_port = 22 7 | to_port = 22 8 | protocol = "tcp" 9 | cidr_blocks = ["0.0.0.0/0"] 10 | } 11 | egress { 12 | from_port = 0 13 | to_port = 0 14 | protocol = "-1" 15 | cidr_blocks = ["0.0.0.0/0"] 16 | } 17 | lifecycle { 18 | create_before_destroy = true 19 | } 20 | } 21 | resource "aws_security_group" "sec_openshift" { 22 | name = "sec_openshift" 23 | description = "Used for openshift instances" 24 | vpc_id = "${aws_vpc.default.id}" 25 | ingress { 26 | from_port = 0 27 | to_port = 0 28 | protocol = "-1" 29 | cidr_blocks = ["0.0.0.0/0"] 30 | } 31 | egress { 32 | from_port = 0 33 | to_port = 0 34 | protocol = "-1" 35 | cidr_blocks = ["0.0.0.0/0"] 36 | } 37 | lifecycle { 38 | create_before_destroy = true 39 | } 40 | } 41 | resource "aws_security_group" "sec_master_alb" { 42 | name = "sec_master_alb" 43 | vpc_id = "${aws_vpc.default.id}" 44 | 45 | egress { 46 | from_port = 0 47 | to_port = 0 48 | protocol = "-1" 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | ingress { 52 | from_port = 8443 53 | to_port = 8443 54 | protocol = "tcp" 55 | cidr_blocks = ["0.0.0.0/0"] 56 | } 57 | } 58 | resource "aws_security_group" "sec_infra_alb" { 59 | name = "sec_infra_alb" 60 | vpc_id = "${aws_vpc.default.id}" 61 | 62 | egress { 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | cidr_blocks = ["0.0.0.0/0"] 67 | } 68 | ingress { 69 | from_port = 80 70 | to_port = 80 71 | protocol = "tcp" 72 | cidr_blocks = ["0.0.0.0/0"] 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "bastion_key_path" { 2 | description = "My public ssh key" 3 | default = "~/.ssh/id_rsa.pub" 4 | } 5 | variable "openshift_key_path" { 6 | description = "My public ssh key" 7 | default = "./helper_scripts/id_rsa.pub" 8 | } 9 | variable "bastion_key_name" { 10 | description = "Desired name of AWS key pair" 11 | default = "bastion" 12 | } 13 | variable "openshift_key_name" { 14 | description = "Desired name of AWS key pair" 15 | default = "openshift" 16 | } 17 | variable "aws_region" { 18 | description = "AWS region to launch servers." 19 | default = "eu-west-1" 20 | } 21 | variable "aws_amis" { 22 | default = { 23 | eu-west-1 = "ami-6e28b517" 24 | } 25 | } 26 | variable "vpc_cidr" { 27 | default = "10.0.0.0/20" 28 | description = "the vpc cdir range" 29 | } 30 | variable "public_subnet_a" { 31 | default = "10.0.0.0/24" 32 | description = "Public subnet AZ A" 33 | } 34 | variable "public_subnet_b" { 35 | default = "10.0.4.0/24" 36 | description = "Public subnet AZ B" 37 | } 38 | variable "public_subnet_c" { 39 | default = "10.0.8.0/24" 40 | description = "Public subnet AZ C" 41 | } 42 | variable "private_subnet_a" { 43 | default = "10.0.1.0/24" 44 | description = "Private subnet AZ A" 45 | } 46 | variable "private_subnet_b" { 47 | default = "10.0.5.0/24" 48 | description = "Private subnet AZ B" 49 | } 50 | variable "private_subnet_c" { 51 | default = "10.0.9.0/24" 52 | description = "Private subnet AZ C" 53 | } 54 | -------------------------------------------------------------------------------- /vpc.tf: -------------------------------------------------------------------------------- 1 | # Create a VPC to launch our instances into 2 | resource "aws_vpc" "default" { 3 | cidr_block = "${var.vpc_cidr}" 4 | enable_dns_support = true 5 | enable_dns_hostnames = true 6 | tags { 7 | Name = "VPC" 8 | } 9 | } 10 | resource "aws_subnet" "PublicSubnetA" { 11 | vpc_id = "${aws_vpc.default.id}" 12 | cidr_block = "${var.public_subnet_a}" 13 | tags { 14 | Name = "Public Subnet A" 15 | } 16 | availability_zone = "${data.aws_availability_zones.available.names[0]}" 17 | } 18 | resource "aws_subnet" "PublicSubnetB" { 19 | vpc_id = "${aws_vpc.default.id}" 20 | cidr_block = "${var.public_subnet_b}" 21 | tags { 22 | Name = "Public Subnet B" 23 | } 24 | availability_zone = "${data.aws_availability_zones.available.names[1]}" 25 | } 26 | resource "aws_subnet" "PublicSubnetC" { 27 | vpc_id = "${aws_vpc.default.id}" 28 | cidr_block = "${var.public_subnet_c}" 29 | tags { 30 | Name = "Public Subnet C" 31 | } 32 | availability_zone = "${data.aws_availability_zones.available.names[2]}" 33 | } 34 | resource "aws_subnet" "PrivateSubnetA" { 35 | vpc_id = "${aws_vpc.default.id}" 36 | cidr_block = "${var.private_subnet_a}" 37 | tags { 38 | Name = "Private Subnet A" 39 | } 40 | availability_zone = "${data.aws_availability_zones.available.names[0]}" 41 | } 42 | resource "aws_subnet" "PrivateSubnetB" { 43 | vpc_id = "${aws_vpc.default.id}" 44 | cidr_block = "${var.private_subnet_b}" 45 | tags { 46 | Name = "Private Subnet B" 47 | } 48 | availability_zone = "${data.aws_availability_zones.available.names[1]}" 49 | } 50 | resource "aws_subnet" "PrivateSubnetC" { 51 | vpc_id = "${aws_vpc.default.id}" 52 | cidr_block = "${var.private_subnet_c}" 53 | tags { 54 | Name = "Private Subnet C" 55 | } 56 | availability_zone = "${data.aws_availability_zones.available.names[2]}" 57 | } 58 | resource "aws_route_table_association" "PublicSubnetA" { 59 | subnet_id = "${aws_subnet.PublicSubnetA.id}" 60 | route_table_id = "${aws_route_table.public_route_a.id}" 61 | } 62 | resource "aws_route_table_association" "PublicSubnetB" { 63 | subnet_id = "${aws_subnet.PublicSubnetB.id}" 64 | route_table_id = "${aws_route_table.public_route_b.id}" 65 | } 66 | resource "aws_route_table_association" "PublicSubnetC" { 67 | subnet_id = "${aws_subnet.PublicSubnetC.id}" 68 | route_table_id = "${aws_route_table.public_route_c.id}" 69 | } 70 | resource "aws_route_table_association" "PrivateSubnetA" { 71 | subnet_id = "${aws_subnet.PrivateSubnetA.id}" 72 | route_table_id = "${aws_route_table.private_route_a.id}" 73 | } 74 | resource "aws_route_table_association" "PrivateSubnetB" { 75 | subnet_id = "${aws_subnet.PrivateSubnetB.id}" 76 | route_table_id = "${aws_route_table.private_route_b.id}" 77 | } 78 | resource "aws_route_table_association" "PrivateSubnetC" { 79 | subnet_id = "${aws_subnet.PrivateSubnetC.id}" 80 | route_table_id = "${aws_route_table.private_route_c.id}" 81 | } 82 | resource "aws_internet_gateway" "gw" { 83 | vpc_id = "${aws_vpc.default.id}" 84 | tags { 85 | Name = "Internet Gateway" 86 | } 87 | } 88 | resource "aws_eip" "natgw_a" { 89 | vpc = true 90 | } 91 | resource "aws_eip" "natgw_b" { 92 | vpc = true 93 | } 94 | resource "aws_eip" "natgw_c" { 95 | vpc = true 96 | } 97 | resource "aws_nat_gateway" "public_nat_a" { 98 | allocation_id = "${aws_eip.natgw_a.id}" 99 | subnet_id = "${aws_subnet.PublicSubnetA.id}" 100 | depends_on = ["aws_internet_gateway.gw"] 101 | } 102 | resource "aws_nat_gateway" "public_nat_b" { 103 | allocation_id = "${aws_eip.natgw_b.id}" 104 | subnet_id = "${aws_subnet.PublicSubnetB.id}" 105 | depends_on = ["aws_internet_gateway.gw"] 106 | } 107 | resource "aws_nat_gateway" "public_nat_c" { 108 | allocation_id = "${aws_eip.natgw_c.id}" 109 | subnet_id = "${aws_subnet.PublicSubnetC.id}" 110 | depends_on = ["aws_internet_gateway.gw"] 111 | } 112 | resource "aws_network_acl" "all" { 113 | vpc_id = "${aws_vpc.default.id}" 114 | egress { 115 | protocol = "-1" 116 | rule_no = 2 117 | action = "allow" 118 | cidr_block = "0.0.0.0/0" 119 | from_port = 0 120 | to_port = 0 121 | } 122 | ingress { 123 | protocol = "-1" 124 | rule_no = 1 125 | action = "allow" 126 | cidr_block = "0.0.0.0/0" 127 | from_port = 0 128 | to_port = 0 129 | } 130 | tags { 131 | Name = "open acl" 132 | } 133 | } 134 | resource "aws_route_table" "public_route_a" { 135 | vpc_id = "${aws_vpc.default.id}" 136 | tags { 137 | Name = "Public Route A" 138 | } 139 | route { 140 | cidr_block = "0.0.0.0/0" 141 | gateway_id = "${aws_internet_gateway.gw.id}" 142 | } 143 | } 144 | resource "aws_route_table" "public_route_b" { 145 | vpc_id = "${aws_vpc.default.id}" 146 | tags { 147 | Name = "Public Route B" 148 | } 149 | route { 150 | cidr_block = "0.0.0.0/0" 151 | gateway_id = "${aws_internet_gateway.gw.id}" 152 | } 153 | } 154 | resource "aws_route_table" "public_route_c" { 155 | vpc_id = "${aws_vpc.default.id}" 156 | tags { 157 | Name = "Public Route C" 158 | } 159 | route { 160 | cidr_block = "0.0.0.0/0" 161 | gateway_id = "${aws_internet_gateway.gw.id}" 162 | } 163 | } 164 | resource "aws_route_table" "private_route_a" { 165 | vpc_id = "${aws_vpc.default.id}" 166 | tags { 167 | Name = "Private Route A" 168 | } 169 | route { 170 | cidr_block = "0.0.0.0/0" 171 | nat_gateway_id = "${aws_nat_gateway.public_nat_a.id}" 172 | } 173 | } 174 | resource "aws_route_table" "private_route_b" { 175 | vpc_id = "${aws_vpc.default.id}" 176 | tags { 177 | Name = "Private Route B" 178 | } 179 | route { 180 | cidr_block = "0.0.0.0/0" 181 | nat_gateway_id = "${aws_nat_gateway.public_nat_b.id}" 182 | } 183 | } 184 | resource "aws_route_table" "private_route_c" { 185 | vpc_id = "${aws_vpc.default.id}" 186 | tags { 187 | Name = "Private Route C" 188 | } 189 | route { 190 | cidr_block = "0.0.0.0/0" 191 | nat_gateway_id = "${aws_nat_gateway.public_nat_c.id}" 192 | } 193 | } 194 | --------------------------------------------------------------------------------