├── scripts ├── install-awscli.sh ├── update-package.sh ├── install-supervisor.sh ├── install-nginx.sh ├── run-redis-container.sh ├── install-php.sh ├── install-docker.sh ├── run-mysql-container.sh └── iptable-nat.sh ├── .editorconfig ├── .gitignore ├── provider.tf ├── variables.tf ├── terraform.tf ├── README.md ├── outputs.tf ├── vpc.tf └── ec2.tf /scripts/install-awscli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt install awscli -y -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*.tf] 2 | indent_size = 2 3 | 4 | [*.sh] 5 | indent_size = 4 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .terraform/ 3 | .terraform.lock.hcl 4 | terraform.tfvars -------------------------------------------------------------------------------- /scripts/update-package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | apt update 6 | apt upgrade -y -------------------------------------------------------------------------------- /scripts/install-supervisor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt install supervisor -y 4 | systemctl enable supervisor -------------------------------------------------------------------------------- /scripts/install-nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install nginx 4 | apt install -y nginx 5 | systemctl enable nginx 6 | -------------------------------------------------------------------------------- /scripts/run-redis-container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker run --name redis -d \ 4 | -p 6379:6379 \ 5 | --restart unless-stopped \ 6 | redis:7 \ 7 | redis-server --requirepass "${redis_password}" -------------------------------------------------------------------------------- /provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "ap-northeast-1" 3 | 4 | # allowed_account_ids = [""] 5 | 6 | default_tags { 7 | tags = { 8 | Service = "lemp stack" 9 | Environment = "production" 10 | Owner = "John" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "ssh_public_key_filepath" { 2 | type = string 3 | default = "~/.ssh/id_ed25519.pub" 4 | } 5 | 6 | variable "mysql_root_password" { 7 | type = string 8 | } 9 | 10 | variable "mysql_database_name" { 11 | type = string 12 | } 13 | 14 | variable "mysql_user" { 15 | type = string 16 | } 17 | 18 | variable "mysql_password" { 19 | type = string 20 | } 21 | 22 | variable "redis_password" { 23 | type = string 24 | } 25 | -------------------------------------------------------------------------------- /terraform.tf: -------------------------------------------------------------------------------- 1 | # in multi-person collaboration, terraform's best practice recommends using state lock and remote state 2 | # there is no problem to delete this block, only means that we are using local state here 3 | terraform { 4 | # ref: 5 | backend "s3" { 6 | bucket = "terraform-state-files" 7 | key = "lemp-terraform.tfstate" 8 | region = "ap-northeast-1" 9 | dynamodb_table = "terraform-state-locking" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /scripts/install-php.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # php include inclue 4 | # ppenssl 5 | # pere 6 | 7 | # php8.1-common include 8 | # ctype 9 | # fileinfo 10 | # pdo 11 | # tokenizer 12 | # xml 13 | 14 | # php8.1-json is a virtual package provided by php8.1-cli 15 | 16 | apt install -y software-properties-common 17 | add-apt-repository ppa:ondrej/php -y 18 | 19 | apt install -y \ 20 | php8.1 \ 21 | php8.1-cli \ 22 | php8.1-common \ 23 | php8.1-bcmath \ 24 | php8.1-curl \ 25 | php8.1-dom \ 26 | php8.1-mbstring \ 27 | php8.1-redis \ 28 | php8.1-swoole -------------------------------------------------------------------------------- /scripts/install-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt install -y \ 4 | ca-certificates \ 5 | curl \ 6 | gnupg \ 7 | lsb-release 8 | 9 | mkdir -p /etc/apt/keyrings 10 | 11 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 12 | 13 | echo \ 14 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 15 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 16 | 17 | apt update 18 | apt install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin -------------------------------------------------------------------------------- /scripts/run-mysql-container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | touch /root/create-database.sh 4 | 5 | echo "#\!/usr/bin/env bash 6 | 7 | mysql --user=root --password=\"\$MYSQL_ROOT_PASSWORD\" <<-EOSQL 8 | CREATE DATABASE IF NOT EXISTS ${mysql_database_name}; 9 | GRANT ALL PRIVILEGES ON ${mysql_database_name}.* TO '\$MYSQL_USER'@'%'; 10 | EOSQL" | tee -a /root/create-database.sh 11 | 12 | docker run --name mysql -d \ 13 | -p 3306:3306 \ 14 | -e MYSQL_ROOT_PASSWORD=${mysql_root_password} \ 15 | -e MYSQL_USER=${mysql_user} \ 16 | -e MYSQL_PASSWORD=${mysql_password} \ 17 | -v mysql:/var/lib/mysql \ 18 | -v /root/create-database.sh:/docker-entrypoint-initdb.d/create-database.sh \ 19 | --restart unless-stopped \ 20 | mysql/mysql-server:8.0 21 | 22 | # connect mysql 23 | # sudo docker exec -it mysql mysql -u root -p 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Use Terraform to Build LEMP Stack in AWS 2 | 3 | Using terraform to build LEMP stack, you will create these resources 4 | 5 | - VPC 6 | - public subnet 7 | - private subnet 8 | - app instance 9 | - database instance 10 | - redis instance 11 | - NAT instance 12 | 13 | > **Note** 14 | > 15 | > You must set AWS credentials with suitable permission, you can use aws-cli to set this 16 | > 17 | 18 | After clone the project, you need change directory into project folder 19 | 20 | ```bash 21 | cd lemp 22 | ``` 23 | 24 | Initial the terraform project and install provider 25 | 26 | ```bash 27 | terraform init 28 | ``` 29 | 30 | Use terraform to deploy the resource 31 | 32 | ```bash 33 | terraform apply 34 | ``` 35 | 36 | After review the deployment plan, you can press `yes` to start to deploy 37 | 38 | ## Reference 39 | 40 | - [VPC with public and private subnets (NAT)](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html) -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | # print the public IP of the instance 2 | output "app_public_ip" { 3 | description = "Public IP address of the app instance" 4 | value = aws_eip.app.public_ip 5 | } 6 | 7 | output "app_private_ip" { 8 | description = "Private IP address of the app instance" 9 | value = aws_eip.app.private_ip 10 | } 11 | 12 | output "database_private_ip" { 13 | description = "Private IP address of the database instance" 14 | value = aws_instance.database.private_ip 15 | } 16 | 17 | output "redis_private_ip" { 18 | description = "Private IP address of the redis instance" 19 | value = aws_instance.redis.private_ip 20 | } 21 | 22 | output "nat_private_ip" { 23 | description = "Private IP address of the nat instance" 24 | value = aws_instance.nat.private_ip 25 | } 26 | 27 | # print the availability zone of the instance 28 | output "availability_zones" { 29 | description = "Availability zones of Subnet and EC2 instance" 30 | value = data.aws_availability_zones.available.names[0] 31 | } 32 | -------------------------------------------------------------------------------- /scripts/iptable-nat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # step 1, deploy sysctl config file 4 | # 5 | cat <<'EOF' >/etc/sysctl.d/30-ip_forward.conf 6 | net.ipv4.ip_forward=1 7 | net.ipv4.conf.eth0.send_redirects=0 8 | net.ipv4.ip_forward_use_pmtu=1 9 | EOF 10 | 11 | # reload config 12 | sysctl --load /etc/sysctl.d/30-ip_forward.conf 13 | sysctl -a | grep net.ipv4.ip_forward 14 | 15 | # step 2, deploy NAT service utility 16 | # 17 | interface_name=$(ip route show | grep 'default' | xargs | rev | cut -d ' ' -f 1 | rev) 18 | [[ ! -d /opt/nat/ ]] && mkdir -p /opt/nat/ 19 | 20 | cat </opt/nat/ip_nat.sh 21 | #!/bin/bash 22 | 23 | # enable ip nat postrouting via iptables command 24 | iptables -t nat -A POSTROUTING -o ${interface_name} -j MASQUERADE 25 | 26 | # display the NAT routing rule information 27 | iptables -t nat -L POSTROUTING 28 | EOF 29 | chmod +x /opt/nat/ip_nat.sh 30 | 31 | # step 3, configure snat service 32 | # 33 | cat </etc/systemd/system/snat.service 34 | [Unit] 35 | Description = SNAT via ENI ${interface_name} 36 | 37 | [Service] 38 | ExecStart = /opt/nat/ip_nat.sh 39 | Type = oneshot 40 | 41 | [Install] 42 | WantedBy = multi-user.target 43 | EOF 44 | 45 | # step 4, install and enable the snat service 46 | # 47 | systemctl enable snat 48 | systemctl start snat 49 | -------------------------------------------------------------------------------- /vpc.tf: -------------------------------------------------------------------------------- 1 | # instance type has a corresponding availability zone in each region 2 | # here the availability zones are filtered by the resource aws_availability_zones 3 | # ref: https://aws.amazon.com/tw/premiumsupport/knowledge-center/ec2-instance-type-not-supported-az-error/ 4 | data "aws_availability_zones" "available" { 5 | state = "available" 6 | } 7 | 8 | # set virtual private cloud (vpc) 9 | resource "aws_vpc" "main" { 10 | cidr_block = "10.0.0.0/16" 11 | enable_dns_hostnames = true 12 | enable_dns_support = true 13 | 14 | tags = { 15 | Name = "lemp stack" 16 | } 17 | } 18 | 19 | # set up internet gateways to allow communication between the instance in the vpc and the external internet 20 | resource "aws_internet_gateway" "igw" { 21 | vpc_id = aws_vpc.main.id 22 | 23 | tags = { 24 | Name = "internet gateway" 25 | } 26 | } 27 | 28 | # set up the public subnet to put the app instance 29 | resource "aws_subnet" "public" { 30 | vpc_id = aws_vpc.main.id 31 | # subnet availability zone needs to be the same as instance's availability zone 32 | availability_zone = data.aws_availability_zones.available.names[0] 33 | cidr_block = "10.0.0.0/24" 34 | 35 | tags = { 36 | Name = "public subnet" 37 | } 38 | } 39 | 40 | # set up the private subnet to put the database instance 41 | resource "aws_subnet" "private" { 42 | vpc_id = aws_vpc.main.id 43 | # subnet availability zone needs to be the same as instance's availability zone 44 | availability_zone = data.aws_availability_zones.available.names[0] 45 | cidr_block = "10.0.1.0/24" 46 | 47 | tags = { 48 | Name = "private subnet" 49 | } 50 | } 51 | 52 | # set up the route table, enables network packets to flow in and out efficiently 53 | resource "aws_route_table" "public" { 54 | vpc_id = aws_vpc.main.id 55 | 56 | route { 57 | cidr_block = "0.0.0.0/0" 58 | gateway_id = aws_internet_gateway.igw.id 59 | } 60 | 61 | route { 62 | ipv6_cidr_block = "::/0" 63 | gateway_id = aws_internet_gateway.igw.id 64 | } 65 | 66 | tags = { 67 | Name = "public" 68 | } 69 | } 70 | 71 | resource "aws_route_table_association" "public_route_table_association" { 72 | subnet_id = aws_subnet.public.id 73 | route_table_id = aws_route_table.public.id 74 | } 75 | 76 | resource "aws_route_table" "private" { 77 | vpc_id = aws_vpc.main.id 78 | 79 | route { 80 | cidr_block = "0.0.0.0/0" 81 | network_interface_id = aws_instance.nat.primary_network_interface_id 82 | } 83 | 84 | tags = { 85 | Name = "private" 86 | } 87 | } 88 | 89 | resource "aws_route_table_association" "private_route_table_association" { 90 | subnet_id = aws_subnet.private.id 91 | route_table_id = aws_route_table.private.id 92 | } 93 | 94 | # set public IP of app 95 | resource "aws_eip" "app" { 96 | vpc = true 97 | instance = aws_instance.app.id 98 | } 99 | -------------------------------------------------------------------------------- /ec2.tf: -------------------------------------------------------------------------------- 1 | # filter option can refer to aws cli "describe-images" 2 | # ref: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html 3 | data "aws_ami" "ubuntu_22_04_arm" { 4 | most_recent = true 5 | 6 | filter { 7 | name = "architecture" 8 | values = ["arm64"] 9 | } 10 | 11 | filter { 12 | name = "name" 13 | values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-arm64-server-*"] 14 | } 15 | 16 | filter { 17 | name = "virtualization-type" 18 | values = ["hvm"] 19 | } 20 | 21 | owners = ["099720109477"] # Canonical 22 | } 23 | 24 | data "aws_ami" "amazon_linux_arm" { 25 | most_recent = true 26 | 27 | filter { 28 | name = "name" 29 | values = ["amzn2-ami-kernel-5.10-hvm-2.0.*"] 30 | } 31 | 32 | filter { 33 | name = "virtualization-type" 34 | values = ["hvm"] 35 | } 36 | 37 | filter { 38 | name = "root-device-type" 39 | values = ["ebs"] 40 | } 41 | 42 | filter { 43 | name = "architecture" 44 | values = ["arm64"] 45 | } 46 | 47 | filter { 48 | name = "image-type" 49 | values = ["machine"] 50 | } 51 | 52 | owners = ["amazon"] 53 | } 54 | 55 | resource "aws_instance" "app" { 56 | ami = data.aws_ami.ubuntu_22_04_arm.id 57 | instance_type = "t4g.micro" 58 | key_name = aws_key_pair.ssh.key_name 59 | 60 | # vpc setting, place the instance in the specified subnet 61 | availability_zone = data.aws_availability_zones.available.names[0] 62 | subnet_id = aws_subnet.public.id 63 | security_groups = [aws_security_group.app.id] 64 | 65 | # when instance launched, execute the configuration tasks 66 | user_data_base64 = data.cloudinit_config.app_setup.rendered 67 | user_data_replace_on_change = true 68 | 69 | # Ubuntu community recommended minimum 15 GB of hard drive space 70 | # https://help.ubuntu.com/community/DiskSpace 71 | root_block_device { 72 | volume_type = "gp3" 73 | volume_size = 15 74 | delete_on_termination = true 75 | } 76 | 77 | credit_specification { 78 | cpu_credits = "standard" 79 | } 80 | 81 | tags = { 82 | Name = "app" 83 | } 84 | } 85 | 86 | data "cloudinit_config" "app_setup" { 87 | gzip = true 88 | base64_encode = true 89 | 90 | part { 91 | filename = "01-update-package.sh" 92 | content_type = "text/x-shellscript" 93 | content = file("scripts/update-package.sh") 94 | } 95 | 96 | part { 97 | filename = "02-install-nginx.sh" 98 | content_type = "text/x-shellscript" 99 | content = file("scripts/install-nginx.sh") 100 | } 101 | 102 | part { 103 | filename = "03-install-php.sh" 104 | content_type = "text/x-shellscript" 105 | content = file("scripts/install-php.sh") 106 | } 107 | 108 | part { 109 | filename = "04-install-supervisor.sh" 110 | content_type = "text/x-shellscript" 111 | content = file("scripts/install-supervisor.sh") 112 | } 113 | 114 | part { 115 | filename = "05-install-awscli.sh" 116 | content_type = "text/x-shellscript" 117 | content = file("scripts/install-awscli.sh") 118 | } 119 | 120 | part { 121 | filename = "06-install-docker.sh" 122 | content_type = "text/x-shellscript" 123 | content = file("scripts/install-docker.sh") 124 | } 125 | } 126 | 127 | # set security groups, similar to firewall 128 | resource "aws_security_group" "app" { 129 | name = "app_security_group" 130 | vpc_id = aws_vpc.main.id 131 | 132 | ingress { 133 | description = "ssh" 134 | from_port = 22 135 | to_port = 22 136 | protocol = "tcp" 137 | cidr_blocks = ["0.0.0.0/0"] 138 | } 139 | 140 | ingress { 141 | description = "https" 142 | from_port = 443 143 | to_port = 443 144 | protocol = "tcp" 145 | cidr_blocks = ["0.0.0.0/0"] 146 | } 147 | 148 | egress { 149 | from_port = 0 150 | to_port = 0 151 | protocol = "-1" 152 | cidr_blocks = ["0.0.0.0/0"] 153 | } 154 | 155 | tags = { 156 | Name = "app" 157 | } 158 | } 159 | 160 | resource "aws_instance" "database" { 161 | ami = data.aws_ami.ubuntu_22_04_arm.id 162 | instance_type = "t4g.micro" 163 | key_name = aws_key_pair.ssh.key_name 164 | availability_zone = data.aws_availability_zones.available.names[0] 165 | user_data_base64 = data.cloudinit_config.database_setup.rendered 166 | user_data_replace_on_change = true 167 | 168 | network_interface { 169 | network_interface_id = aws_network_interface.database.id 170 | device_index = 0 171 | } 172 | 173 | root_block_device { 174 | volume_type = "gp3" 175 | volume_size = 15 176 | delete_on_termination = true 177 | encrypted = true 178 | } 179 | 180 | credit_specification { 181 | cpu_credits = "standard" 182 | } 183 | 184 | tags = { 185 | Name = "database" 186 | } 187 | } 188 | 189 | data "cloudinit_config" "database_setup" { 190 | gzip = true 191 | base64_encode = true 192 | 193 | part { 194 | filename = "01-update-package.sh" 195 | content_type = "text/x-shellscript" 196 | content = file("scripts/update-package.sh") 197 | } 198 | 199 | part { 200 | filename = "02-install-docker.sh" 201 | content_type = "text/x-shellscript" 202 | content = file("scripts/install-docker.sh") 203 | } 204 | 205 | part { 206 | filename = "03-run-mysql-container.sh" 207 | content_type = "text/x-shellscript" 208 | 209 | content = templatefile("scripts/run-mysql-container.sh", { 210 | mysql_database_name = var.mysql_database_name 211 | mysql_root_password = var.mysql_root_password 212 | mysql_user = var.mysql_user 213 | mysql_password = var.mysql_password 214 | }) 215 | } 216 | } 217 | 218 | resource "aws_network_interface" "database" { 219 | subnet_id = aws_subnet.private.id 220 | security_groups = [aws_security_group.database.id] 221 | private_ips = ["10.0.1.100"] 222 | 223 | tags = { 224 | Name = "database_network_interface" 225 | } 226 | } 227 | 228 | resource "aws_security_group" "database" { 229 | name = "database_security_group" 230 | vpc_id = aws_vpc.main.id 231 | 232 | ingress { 233 | description = "ssh" 234 | from_port = 22 235 | to_port = 22 236 | protocol = "tcp" 237 | cidr_blocks = ["0.0.0.0/0"] 238 | } 239 | 240 | ingress { 241 | description = "mysql" 242 | from_port = 3306 243 | to_port = 3306 244 | protocol = "tcp" 245 | cidr_blocks = ["0.0.0.0/0"] 246 | } 247 | 248 | egress { 249 | from_port = 0 250 | to_port = 0 251 | protocol = "-1" 252 | cidr_blocks = ["0.0.0.0/0"] 253 | } 254 | 255 | tags = { 256 | Name = "database" 257 | } 258 | } 259 | 260 | resource "aws_instance" "redis" { 261 | ami = data.aws_ami.ubuntu_22_04_arm.id 262 | instance_type = "t4g.micro" 263 | key_name = aws_key_pair.ssh.key_name 264 | availability_zone = data.aws_availability_zones.available.names[0] 265 | user_data_base64 = data.cloudinit_config.redis_setup.rendered 266 | user_data_replace_on_change = true 267 | 268 | network_interface { 269 | network_interface_id = aws_network_interface.redis.id 270 | device_index = 0 271 | } 272 | 273 | root_block_device { 274 | volume_type = "gp3" 275 | volume_size = 15 276 | delete_on_termination = true 277 | encrypted = true 278 | } 279 | 280 | credit_specification { 281 | cpu_credits = "standard" 282 | } 283 | 284 | tags = { 285 | Name = "redis" 286 | } 287 | } 288 | 289 | resource "aws_network_interface" "redis" { 290 | subnet_id = aws_subnet.private.id 291 | security_groups = [aws_security_group.redis.id] 292 | private_ips = ["10.0.1.101"] 293 | 294 | tags = { 295 | Name = "redis_network_interface" 296 | } 297 | } 298 | 299 | data "cloudinit_config" "redis_setup" { 300 | gzip = true 301 | base64_encode = true 302 | 303 | part { 304 | filename = "01-update-package.sh" 305 | content_type = "text/x-shellscript" 306 | content = file("scripts/update-package.sh") 307 | } 308 | 309 | part { 310 | filename = "02-install-docker.sh" 311 | content_type = "text/x-shellscript" 312 | content = file("scripts/install-docker.sh") 313 | } 314 | 315 | part { 316 | filename = "03-run-redis-container.sh" 317 | content_type = "text/x-shellscript" 318 | 319 | content = templatefile("scripts/run-redis-container.sh", { 320 | redis_password = var.redis_password 321 | }) 322 | } 323 | } 324 | 325 | resource "aws_security_group" "redis" { 326 | name = "redis_security_group" 327 | vpc_id = aws_vpc.main.id 328 | 329 | ingress { 330 | description = "ssh" 331 | from_port = 22 332 | to_port = 22 333 | protocol = "tcp" 334 | cidr_blocks = ["0.0.0.0/0"] 335 | } 336 | 337 | ingress { 338 | description = "redis" 339 | from_port = 6379 340 | to_port = 6379 341 | protocol = "tcp" 342 | cidr_blocks = ["0.0.0.0/0"] 343 | } 344 | 345 | egress { 346 | from_port = 0 347 | to_port = 0 348 | protocol = "-1" 349 | cidr_blocks = ["0.0.0.0/0"] 350 | } 351 | } 352 | 353 | resource "aws_instance" "nat" { 354 | ami = data.aws_ami.amazon_linux_arm.id 355 | instance_type = "t4g.nano" 356 | key_name = aws_key_pair.ssh.key_name 357 | availability_zone = data.aws_availability_zones.available.names[0] 358 | subnet_id = aws_subnet.public.id 359 | associate_public_ip_address = true 360 | security_groups = [aws_security_group.nat.id] 361 | source_dest_check = false 362 | user_data_base64 = data.cloudinit_config.nat_setup.rendered 363 | user_data_replace_on_change = true 364 | 365 | root_block_device { 366 | volume_type = "gp3" 367 | delete_on_termination = true 368 | encrypted = true 369 | } 370 | 371 | credit_specification { 372 | cpu_credits = "standard" 373 | } 374 | 375 | tags = { 376 | Name = "nat" 377 | } 378 | } 379 | 380 | data "cloudinit_config" "nat_setup" { 381 | gzip = true 382 | base64_encode = true 383 | 384 | part { 385 | filename = "01-update-package.sh" 386 | content_type = "text/x-shellscript" 387 | content = file("scripts/update-package.sh") 388 | } 389 | 390 | part { 391 | filename = "02-iptable-nat.sh" 392 | content_type = "text/x-shellscript" 393 | content = file("scripts/iptable-nat.sh") 394 | } 395 | } 396 | 397 | resource "aws_security_group" "nat" { 398 | name = "nat_security_group" 399 | vpc_id = aws_vpc.main.id 400 | 401 | ingress { 402 | description = "Allow NAT for all my promary vpc cidr" 403 | from_port = 0 404 | to_port = 0 405 | protocol = "-1" 406 | cidr_blocks = ["0.0.0.0/0"] 407 | } 408 | 409 | egress { 410 | description = "Allow NAT out-going anywhere" 411 | from_port = 0 412 | to_port = 0 413 | protocol = "-1" 414 | cidr_blocks = ["0.0.0.0/0"] 415 | ipv6_cidr_blocks = ["::/0"] 416 | } 417 | 418 | tags = { 419 | Name = "nat" 420 | } 421 | } 422 | 423 | # set ssh key pair to connect the instance 424 | resource "aws_key_pair" "ssh" { 425 | key_name = "lemp_ssh_key" 426 | public_key = file(var.ssh_public_key_filepath) 427 | } 428 | --------------------------------------------------------------------------------