├── .gitignore ├── Makefile ├── README.md ├── app-servers.tf ├── bin ├── ovpn-client-config.sh ├── ovpn-init.sh ├── ovpn-new-client.sh └── ovpn-run.sh ├── key-pairs.tf ├── main.tf ├── nat-server.tf ├── outputs.tf ├── packer ├── aws │ └── app-server.json └── scripts │ ├── base.sh │ ├── cleanup.sh │ └── docker.sh ├── private-subnet.tf ├── public-subnet.tf ├── security-groups.tf ├── ssh_keys └── rmorgan.pub ├── user_data └── app-server.sh ├── variables.tf └── vpc.tf /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | .terraform.tfstate.lock.info 3 | .test-data 4 | vendor 5 | terraform.tfstate 6 | terraform.tfstate.backup 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all bake test init plan apply destroy help 2 | 3 | all: help 4 | 5 | bake: ## Bake a new AMI using Packer 6 | packer build packer/aws/app-server.json 7 | 8 | test: ## Run the automated tests using Terratest 9 | cd test; go test -v -timeout 30m 10 | 11 | init: ## Initialize Terraform to work locally 12 | terraform init 13 | 14 | plan: ## Run the Terraform plan step 15 | terraform plan -var "ami=${AMI}" 16 | 17 | apply: ## Run the Terraform apply step 18 | terraform apply -auto-approve -var "ami=${AMI}" 19 | 20 | destroy: ## You probably don't know what you're doing 21 | terraform destroy -var "ami=${AMI}" 22 | 23 | help: 24 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform Rolling Deployment Demo 2 | 3 | This is a demo project that accompanies a blog post I wrote. 4 | It shows how to use various tools from the Hashicorp stack to achieve rolling 5 | deployments on AWS with zero downtime. 6 | 7 | The post is here: https://robmorgan.id.au/posts/rolling-deploys-on-aws-using-terraform/ 8 | 9 | It is based on a concept from Paul Hinze: 10 | https://groups.google.com/forum/#!msg/terraform-tool/7Gdhv1OAc80/iNQ93riiLwAJ 11 | 12 | ## Architecture 13 | 14 | ![terraform aws architecture - current architecture](https://cloud.githubusercontent.com/assets/178939/13179538/9cbc9aec-d724-11e5-91e4-77b9a06ebdc6.png) 15 | 16 | ## Requirements 17 | 18 | You should have the following tools installed: 19 | 20 | * Git 21 | * Packer (v1.2.3) 22 | * Terraform (v0.11.7) 23 | 24 | Additionally you will need the following environment variables set: 25 | 26 | * AWS_ACCESS_KEY_ID 27 | * AWS_SECRET_ACCESS_KEY 28 | 29 | ## Usage 30 | 31 | 1. Copy your public SSH key to the `ssh_keys` directory and update the path in `key-pairs.tf`. 32 | 2. Before we create the AWS infrastructure we must first _bake_ a new AMI using Packer. 33 | Using the supplied `Makefile`, simply run: 34 | 35 | ``` 36 | $ make bake 37 | ``` 38 | 39 | When Packer finishes running it will output an AMI ID we need for the next step. 40 | 41 | 3. Now we can use Terraform to create the AWS resources: 42 | 43 | ``` 44 | $ make plan AMI="ami-XXXYYYZZ" 45 | $ make apply AMI="ami-XXXYYYZZ" 46 | ``` 47 | 48 | 4. Deployment is a case of baking a fresh AMI then re-running Terraform: 49 | 50 | ``` 51 | $ make bake 52 | $ make plan AMI="ami-XXXYYYZZ" 53 | $ make apply AMI="ami-XXXYYYZZ" 54 | ``` 55 | 56 | ## Tests 57 | 58 | You can run the included tests using `Terratest`: 59 | 60 | ``` 61 | $ make test 62 | ``` 63 | -------------------------------------------------------------------------------- /app-servers.tf: -------------------------------------------------------------------------------- 1 | resource "aws_elb" "elb_app" { 2 | name = "app-elb" 3 | 4 | listener { 5 | instance_port = 80 6 | instance_protocol = "http" 7 | lb_port = 80 8 | lb_protocol = "http" 9 | } 10 | 11 | health_check { 12 | healthy_threshold = 2 13 | unhealthy_threshold = 2 14 | timeout = 5 15 | target = "HTTP:80/" 16 | interval = 15 17 | } 18 | 19 | cross_zone_load_balancing = true 20 | idle_timeout = 60 21 | subnets = ["${aws_subnet.public_az1.id}", "${aws_subnet.public_az2.id}", "${aws_subnet.public_az3.id}"] 22 | security_groups = ["${aws_security_group.elb_web.id}"] 23 | 24 | tags { 25 | Name = "app-elb" 26 | } 27 | } 28 | 29 | resource "aws_autoscaling_group" "asg_app" { 30 | lifecycle { 31 | create_before_destroy = true 32 | } 33 | 34 | # interpolate the LC into the ASG name so it always forces an update 35 | name = "asg-app - ${aws_launch_configuration.lc_app.name}" 36 | max_size = 5 37 | min_size = 2 38 | wait_for_elb_capacity = 2 39 | desired_capacity = 2 40 | health_check_grace_period = 150 41 | health_check_type = "ELB" 42 | launch_configuration = "${aws_launch_configuration.lc_app.id}" 43 | load_balancers = ["${aws_elb.elb_app.id}"] 44 | 45 | # spread the app instances across the availability zones 46 | vpc_zone_identifier = ["${aws_subnet.private_az1.id}", "${aws_subnet.private_az2.id}", "${aws_subnet.private_az3.id}"] 47 | 48 | tag { 49 | key = "Name" 50 | value = "app${count.index}" 51 | propagate_at_launch = true 52 | } 53 | } 54 | 55 | resource "aws_launch_configuration" "lc_app" { 56 | lifecycle { 57 | create_before_destroy = true 58 | } 59 | 60 | image_id = "${var.ami}" 61 | instance_type = "c4.large" 62 | 63 | # Our Security group to allow HTTP and SSH access 64 | security_groups = ["${aws_security_group.default.id}", "${aws_security_group.app.id}"] 65 | 66 | user_data = "${file("user_data/app-server.sh")}" 67 | } 68 | -------------------------------------------------------------------------------- /bin/ovpn-client-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OVPN_DATA="ovpn-data" 3 | ssh -t ubuntu@$(terraform output nat.ip) sudo docker run --volumes-from $OVPN_DATA --rm kylemanna/openvpn ovpn_getclient "${1}" > "${1}.ovpn" 4 | -------------------------------------------------------------------------------- /bin/ovpn-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OVPN_DATA="ovpn-data" 3 | ssh -t ubuntu@$(terraform output nat.ip) sudo docker run --volumes-from $OVPN_DATA --rm kylemanna/openvpn ovpn_genconfig -u udp://$(terraform output nat.ip) 4 | ssh -t ubuntu@$(terraform output nat.ip) sudo docker run --volumes-from $OVPN_DATA --rm -it kylemanna/openvpn ovpn_initpki 5 | -------------------------------------------------------------------------------- /bin/ovpn-new-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OVPN_DATA="ovpn-data" 3 | ssh -t ubuntu@$(terraform output nat.ip) sudo docker run --volumes-from $OVPN_DATA --rm -it kylemanna/openvpn easyrsa build-client-full "${1}" nopass 4 | -------------------------------------------------------------------------------- /bin/ovpn-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OVPN_DATA="ovpn-data" 3 | ssh -t ubuntu@$(terraform output nat.ip) sudo docker run --volumes-from $OVPN_DATA -d -p 1194:1194/udp --cap-add=NET_ADMIN kylemanna/openvpn 4 | -------------------------------------------------------------------------------- /key-pairs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "deployer" { 2 | key_name = "demo-rmorgan" 3 | public_key = "${file("ssh_keys/rmorgan.pub")}" 4 | } 5 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.11.7" 3 | } 4 | 5 | provider "aws" { 6 | region = "${var.aws_region}" 7 | version = "~> 1.17" 8 | } 9 | -------------------------------------------------------------------------------- /nat-server.tf: -------------------------------------------------------------------------------- 1 | /* NAT/VPN server */ 2 | resource "aws_eip" "nat" { 3 | instance = "${aws_instance.nat.id}" 4 | vpc = true 5 | } 6 | 7 | resource "aws_instance" "nat" { 8 | ami = "${lookup(var.amis, var.aws_region)}" 9 | instance_type = "t2.micro" 10 | 11 | # deploy the nat instance into the first availability zone 12 | subnet_id = "${aws_subnet.public_az1.id}" 13 | 14 | # Our Security group to allow HTTP and SSH access 15 | vpc_security_group_ids = ["${aws_security_group.default.id}", "${aws_security_group.nat.id}"] 16 | 17 | key_name = "${aws_key_pair.deployer.key_name}" 18 | source_dest_check = false 19 | 20 | tags = { 21 | Name = "nat" 22 | } 23 | 24 | connection { 25 | user = "ubuntu" 26 | } 27 | 28 | provisioner "remote-exec" { 29 | inline = [ 30 | "sudo iptables -t nat -A POSTROUTING -j MASQUERADE", 31 | "echo '1' | sudo tee /proc/sys/net/ipv4/ip_forward", 32 | 33 | /* Install docker */ 34 | "curl -sSL https://get.docker.com/ | sudo sh", 35 | 36 | /* Initialize open vpn data container */ 37 | "sudo mkdir -p /etc/openvpn", 38 | 39 | "sudo docker run --name ovpn-data -v /etc/openvpn busybox", 40 | 41 | /* Generate OpenVPN server config */ 42 | "sudo docker run --volumes-from ovpn-data --rm kylemanna/openvpn ovpn_genconfig -p ${var.vpc_cidr} -u udp://${aws_instance.nat.public_ip}", 43 | ] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "nat.ip" { 2 | value = "${aws_instance.nat.public_ip}" 3 | } 4 | 5 | output "asg_name" { 6 | value = "${aws_autoscaling_group.asg_app.id}" 7 | } 8 | 9 | output "elb_name" { 10 | value = "${aws_elb.elb_app.dns_name}" 11 | } 12 | 13 | output "url" { 14 | value = "http://${aws_elb.elb_app.dns_name}" 15 | } 16 | -------------------------------------------------------------------------------- /packer/aws/app-server.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "ssh_username": "ubuntu" 6 | }, 7 | "builders": [ 8 | { 9 | "type": "amazon-ebs", 10 | "access_key": "{{user `aws_access_key`}}", 11 | "secret_key": "{{user `aws_secret_key` }}", 12 | "region": "eu-west-1", 13 | "source_ami": "ami-8d16ccfe", 14 | "instance_type": "c4.large", 15 | "ssh_username": "{{user `ssh_username`}}", 16 | "ami_name": "packer-base {{timestamp}}", 17 | "associate_public_ip_address": true 18 | } 19 | ], 20 | "provisioners": [ 21 | { 22 | "type": "shell", 23 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 24 | "inline": [ 25 | "mkdir -p /ops", 26 | "chmod a+w /ops" 27 | ] 28 | }, 29 | { 30 | "type": "file", 31 | "source": "{{template_dir}}/../../ssh_keys", 32 | "destination": "/ops" 33 | }, 34 | { 35 | "type": "shell", 36 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 37 | "inline": [ 38 | "cat /ops/ssh_keys/* >> /home/ubuntu/.ssh/authorized_keys" 39 | ] 40 | }, 41 | { 42 | "type": "shell", 43 | "scripts": [ 44 | "{{template_dir}}/../scripts/base.sh" 45 | ], 46 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E '{{ .Path }}'" 47 | }, 48 | { 49 | "type": "shell", 50 | "scripts": [ 51 | "{{template_dir}}/../scripts/docker.sh" 52 | ], 53 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E '{{ .Path }}'" 54 | }, 55 | { 56 | "type": "shell", 57 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 58 | "inline": [ 59 | "docker pull nginx" 60 | ] 61 | }, 62 | { 63 | "type": "shell", 64 | "scripts": [ 65 | "{{template_dir}}/../scripts/cleanup.sh" 66 | ], 67 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E '{{ .Path }}'" 68 | } 69 | ] 70 | } 71 | -------------------------------------------------------------------------------- /packer/scripts/base.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -xe 3 | 4 | # update packages 5 | sudo apt-get update 6 | sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -y 7 | 8 | # install base packages 9 | sudo DEBIAN_FRONTEND=noninteractive apt-get install -y vim curl 10 | 11 | # hostname scripts 12 | sudo bash -c 'cat << "EOF" > /opt/update_hostname.sh 13 | #!/bin/bash 14 | name="$(ec2metadata --instance-id 2>/dev/null)" 15 | if [ "$name" != "" ]; then 16 | echo "writing hostname $name" 17 | echo -n $name > /etc/hostname 18 | echo "127.0.0.1 $name localhost" > /etc/hosts 19 | else 20 | echo "ec2metadata not found" 21 | fi 22 | 23 | hostname -b -F /etc/hostname 24 | EOF' 25 | 26 | sudo bash -c 'cat << "EOF" > /etc/init/hostname.conf 27 | description "set system hostname" 28 | 29 | start on startup 30 | 31 | pre-start script 32 | bash /opt/update_hostname.sh 33 | end script 34 | 35 | task 36 | exec hostname -b -F /etc/hostname 37 | EOF' 38 | 39 | sudo chmod 0755 /opt/update_hostname.sh 40 | sudo chmod 0644 /etc/init/hostname.conf 41 | sudo /opt/update_hostname.sh 42 | sudo hostname -b -F /etc/hostname 43 | -------------------------------------------------------------------------------- /packer/scripts/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -xe 3 | 4 | echo Cleaning up... 5 | sudo apt-get -y autoremove 6 | sudo apt-get -y clean 7 | 8 | sudo rm -rf /tmp/* 9 | sudo rm -rf /ops 10 | -------------------------------------------------------------------------------- /packer/scripts/docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -xe 3 | 4 | # install Docker 5 | curl -fsSL https://get.docker.com/ | sudo sh 6 | 7 | # install Docker Compose 8 | sudo sh -c 'curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose' 9 | sudo chmod +x /usr/local/bin/docker-compose 10 | -------------------------------------------------------------------------------- /private-subnet.tf: -------------------------------------------------------------------------------- 1 | /* Private subnet */ 2 | resource "aws_subnet" "private_az1" { 3 | vpc_id = "${aws_vpc.default.id}" 4 | cidr_block = "${var.private_subnet_az1_cidr}" 5 | availability_zone = "eu-west-1a" 6 | map_public_ip_on_launch = false 7 | depends_on = ["aws_instance.nat"] 8 | 9 | tags { 10 | Name = "private az1" 11 | } 12 | } 13 | 14 | resource "aws_subnet" "private_az2" { 15 | vpc_id = "${aws_vpc.default.id}" 16 | cidr_block = "${var.private_subnet_az2_cidr}" 17 | availability_zone = "eu-west-1b" 18 | map_public_ip_on_launch = false 19 | depends_on = ["aws_instance.nat"] 20 | 21 | tags { 22 | Name = "private az2" 23 | } 24 | } 25 | 26 | resource "aws_subnet" "private_az3" { 27 | vpc_id = "${aws_vpc.default.id}" 28 | cidr_block = "${var.private_subnet_az3_cidr}" 29 | availability_zone = "eu-west-1c" 30 | map_public_ip_on_launch = false 31 | depends_on = ["aws_instance.nat"] 32 | 33 | tags { 34 | Name = "private az3" 35 | } 36 | } 37 | 38 | /* Routing table for private subnet */ 39 | resource "aws_route_table" "private" { 40 | vpc_id = "${aws_vpc.default.id}" 41 | 42 | route { 43 | cidr_block = "0.0.0.0/0" 44 | instance_id = "${aws_instance.nat.id}" 45 | } 46 | } 47 | 48 | /* Associate the routing table to public subnets */ 49 | resource "aws_route_table_association" "private_az1" { 50 | subnet_id = "${aws_subnet.private_az1.id}" 51 | route_table_id = "${aws_route_table.private.id}" 52 | } 53 | 54 | resource "aws_route_table_association" "private_az2" { 55 | subnet_id = "${aws_subnet.private_az2.id}" 56 | route_table_id = "${aws_route_table.private.id}" 57 | } 58 | 59 | resource "aws_route_table_association" "private_az3" { 60 | subnet_id = "${aws_subnet.private_az3.id}" 61 | route_table_id = "${aws_route_table.private.id}" 62 | } 63 | -------------------------------------------------------------------------------- /public-subnet.tf: -------------------------------------------------------------------------------- 1 | /* Internet gateway for the public subnet */ 2 | resource "aws_internet_gateway" "default" { 3 | vpc_id = "${aws_vpc.default.id}" 4 | } 5 | 6 | /* Public subnets */ 7 | resource "aws_subnet" "public_az1" { 8 | vpc_id = "${aws_vpc.default.id}" 9 | cidr_block = "${var.public_subnet_az1_cidr}" 10 | availability_zone = "eu-west-1a" 11 | map_public_ip_on_launch = true 12 | depends_on = ["aws_internet_gateway.default"] 13 | 14 | tags { 15 | Name = "public az1" 16 | } 17 | } 18 | 19 | resource "aws_subnet" "public_az2" { 20 | vpc_id = "${aws_vpc.default.id}" 21 | cidr_block = "${var.public_subnet_az2_cidr}" 22 | availability_zone = "eu-west-1b" 23 | map_public_ip_on_launch = true 24 | depends_on = ["aws_internet_gateway.default"] 25 | 26 | tags { 27 | Name = "public az2" 28 | } 29 | } 30 | 31 | resource "aws_subnet" "public_az3" { 32 | vpc_id = "${aws_vpc.default.id}" 33 | cidr_block = "${var.public_subnet_az3_cidr}" 34 | availability_zone = "eu-west-1c" 35 | map_public_ip_on_launch = true 36 | depends_on = ["aws_internet_gateway.default"] 37 | 38 | tags { 39 | Name = "public az3" 40 | } 41 | } 42 | 43 | /* Routing table for public subnet */ 44 | resource "aws_route_table" "public" { 45 | vpc_id = "${aws_vpc.default.id}" 46 | 47 | route { 48 | cidr_block = "0.0.0.0/0" 49 | gateway_id = "${aws_internet_gateway.default.id}" 50 | } 51 | } 52 | 53 | /* Associate the routing table to public subnets */ 54 | resource "aws_route_table_association" "public_az1" { 55 | subnet_id = "${aws_subnet.public_az1.id}" 56 | route_table_id = "${aws_route_table.public.id}" 57 | } 58 | 59 | resource "aws_route_table_association" "public_az2" { 60 | subnet_id = "${aws_subnet.public_az2.id}" 61 | route_table_id = "${aws_route_table.public.id}" 62 | } 63 | 64 | resource "aws_route_table_association" "public_az3" { 65 | subnet_id = "${aws_subnet.public_az3.id}" 66 | route_table_id = "${aws_route_table.public.id}" 67 | } 68 | -------------------------------------------------------------------------------- /security-groups.tf: -------------------------------------------------------------------------------- 1 | /* Default security group */ 2 | resource "aws_security_group" "default" { 3 | name = "sg_default" 4 | description = "Default security group that allows inbound and outbound traffic from all instances in the VPC" 5 | vpc_id = "${aws_vpc.default.id}" 6 | 7 | ingress { 8 | from_port = "0" 9 | to_port = "0" 10 | protocol = "-1" 11 | cidr_blocks = ["0.0.0.0/0"] 12 | self = true 13 | } 14 | 15 | egress { 16 | from_port = "0" 17 | to_port = "0" 18 | protocol = "-1" 19 | cidr_blocks = ["0.0.0.0/0"] 20 | self = true 21 | } 22 | } 23 | 24 | /* Security group for the nat server */ 25 | resource "aws_security_group" "nat" { 26 | name = "sg_nat" 27 | description = "Security group for nat instances that allows SSH and VPN traffic from internet" 28 | vpc_id = "${aws_vpc.default.id}" 29 | 30 | ingress { 31 | from_port = 22 32 | to_port = 22 33 | protocol = "tcp" 34 | cidr_blocks = ["0.0.0.0/0"] 35 | } 36 | 37 | ingress { 38 | from_port = 1194 39 | to_port = 1194 40 | protocol = "udp" 41 | cidr_blocks = ["0.0.0.0/0"] 42 | } 43 | } 44 | 45 | /* Security group for the app servers */ 46 | resource "aws_security_group" "app" { 47 | name = "sg_app" 48 | description = "Security group for app instances that allows web traffic inside the VPC" 49 | vpc_id = "${aws_vpc.default.id}" 50 | 51 | ingress { 52 | from_port = 80 53 | to_port = 80 54 | protocol = "tcp" 55 | cidr_blocks = ["0.0.0.0/0"] 56 | } 57 | 58 | egress { 59 | from_port = "0" 60 | to_port = "0" 61 | protocol = "-1" 62 | cidr_blocks = ["0.0.0.0/0"] 63 | } 64 | } 65 | 66 | /* Security group for the web */ 67 | resource "aws_security_group" "elb_web" { 68 | name = "elb-web" 69 | description = "Security group for web that allows web traffic from internet" 70 | vpc_id = "${aws_vpc.default.id}" 71 | 72 | ingress { 73 | from_port = 80 74 | to_port = 80 75 | protocol = "tcp" 76 | cidr_blocks = ["0.0.0.0/0"] 77 | } 78 | 79 | ingress { 80 | from_port = 443 81 | to_port = 443 82 | protocol = "tcp" 83 | cidr_blocks = ["0.0.0.0/0"] 84 | } 85 | 86 | egress { 87 | from_port = "0" 88 | to_port = "0" 89 | protocol = "-1" 90 | cidr_blocks = ["0.0.0.0/0"] 91 | } 92 | 93 | tags { 94 | Name = "elb-web" 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /ssh_keys/rmorgan.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTXFLNSAhYUUg2PK3CIT6UzZOjCdSkdVCVySziZlsV6J4affqaRX8kChiICX7s7esQRCYHZtWWPbmto9P+TWEONoTIttfABTs5mlNGPQV+Pu6IIwhX10ChOvo/Oj6Ma+FAB5OGez7yZWd0BfyTTxPEjHcTdEoBUvrAKdGqzlzCh1xwDMa+pDH/vPwKlIQkcYISB7187DTpWl2q4ic58trzh7/0PO7XVunPV4NIzX4FxkOfMiZ7WImk0sffKg5wwbW18gMfaOa5xTbMgi5ZYvQ/kQ9Z3J1VyeKKXCslc1p3ftmHvIHOYBQZvAjQEK/RcThMjncyokL7Hi9b4fpnmP1WdAQzrC3PvjqUQF+Wu1HFGNcnGLv1dFsiDolG6PLwfnZeqieJaRLJoitIBBs0xBo6KtuS17tx6CYJn0lxc2+O6nW/00OW2yS7QGH5EcacoxA4dc3dWdJ6XrnPpACNtdE+ZJHJfklbkae1/342CXgX3IHJlna/wFyEQu3yv5ziIR6E39L+Ea23DiLLERkA9gTQrSUxXpIWM+lHXVMrNrlawp+qMI8tX7sTbesUFcVvqx1DbY1kh8mc1MmznzTm5kqGOZe343gui8l0U0YuthZSrYR+/9lk/N2B7OeM4VHSkxhvjhdEidCgKt7Wf0f//Cl4FJhCiKESSgqAaSa03SSyMw== robbym@gmail.com 2 | -------------------------------------------------------------------------------- /user_data/app-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | bash 2>&1 <<"USERDATA" | while read line; do echo "$(date --iso-8601=ns) $line"; done | tee -a /var/log/userdata.log 3 | set -xe 4 | 5 | # Hostname 6 | /opt/update_hostname.sh 7 | hostname -b -F /etc/hostname 8 | 9 | # Run Nginx 10 | sudo docker run -d -p 80:80 nginx 11 | USERDATA 12 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "AWS region" 3 | default = "eu-west-1" 4 | } 5 | 6 | /* VPC settings */ 7 | variable "vpc_cidr" { 8 | description = "CIDR for VPC" 9 | default = "172.31.0.0/16" 10 | } 11 | 12 | variable "public_subnet_az1_cidr" { 13 | description = "CIDR for az1 public subnet" 14 | default = "172.31.0.0/24" 15 | } 16 | 17 | variable "public_subnet_az2_cidr" { 18 | description = "CIDR for az2 public subnet" 19 | default = "172.31.1.0/24" 20 | } 21 | 22 | variable "public_subnet_az3_cidr" { 23 | description = "CIDR for az3 public subnet" 24 | default = "172.31.2.0/24" 25 | } 26 | 27 | variable "private_subnet_az1_cidr" { 28 | description = "CIDR for az1 private subnet" 29 | default = "172.31.3.0/24" 30 | } 31 | 32 | variable "private_subnet_az2_cidr" { 33 | description = "CIDR for az2 private subnet" 34 | default = "172.31.4.0/24" 35 | } 36 | 37 | variable "private_subnet_az3_cidr" { 38 | description = "CIDR for az3 private subnet" 39 | default = "172.31.5.0/24" 40 | } 41 | 42 | /* Ubuntu 14.04 amis by region */ 43 | variable "amis" { 44 | description = "Base AMI to launch the instances with" 45 | 46 | default = { 47 | us-west-1 = "ami-049d8641" 48 | us-east-1 = "ami-a6b8e7ce" 49 | eu-west-1 = "ami-47a23a30" 50 | } 51 | } 52 | 53 | /* Packer baked AMI */ 54 | variable "ami" { 55 | description = "AMI Generated by Packer" 56 | } 57 | -------------------------------------------------------------------------------- /vpc.tf: -------------------------------------------------------------------------------- 1 | /* Define our vpc */ 2 | resource "aws_vpc" "default" { 3 | cidr_block = "${var.vpc_cidr}" 4 | enable_dns_hostnames = true 5 | 6 | tags { 7 | Name = "terraform-vpc-demo" 8 | } 9 | } 10 | --------------------------------------------------------------------------------