├── .gitmodules
├── LICENSE
├── README.md
├── autoscalling-elb
└── main.tf
├── bastion-highavailability
├── README.md
├── dns.tf
├── outputs.tf
├── provider.tf
├── resources.tf
├── variables.tf
├── variables.tfvars
└── vpc.tf
├── ci-pipeline
├── README.md
├── main.tf
└── setup.sh
├── docker-registry
├── main.tf
├── outputs.tf
├── setup.sh
├── variables.tf
└── variables.tfvars
├── docker-swarm-cluster
├── README.md
├── hosts
├── install-docker.sh
├── outputs.tf
├── playbook.yml
├── provider.tf
├── resources.tf
├── security_groups.tf
└── variables.tf
├── ec2-elb
├── bootstrap-server1.sh
├── bootstrap-server2.sh
├── main.tf
└── variables.tf
├── etcd-cluster
├── README.md
├── cloud-config.yml
├── provider.tf
├── resources.tf
├── security_groups.tf
└── variables.tf
├── linuxkit-aws
├── aws.yml
├── files
│ ├── assume-role-policy.json
│ └── policy.tpl
└── main.tf
├── single-ec2-instance
├── README.md
└── main.tf
├── telegraf-influxdb-grafana
├── bootstrap.sh
├── main.tf
└── variables.tf
├── tick-stack-ansible
├── README.md
├── ansible
│ ├── group_vars
│ │ └── all
│ ├── inventory
│ └── playbook.yml
├── logo.png
└── terraform
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── variables.tfvars
├── vpc-public-private-subnet
├── README.md
├── install.sh
├── provider.tf
├── resources.tf
├── variables.tf
└── vpc.tf
└── wordpress
├── .gitignore
├── bootstrap.sh
├── main.tf
├── outputs.tf
└── variables.tf
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "telegraf-influxdb-grafana/telegraf-influxdb-grafana"]
2 | path = telegraf-influxdb-grafana/telegraf-influxdb-grafana
3 | url = https://github.com/mlabouardy/telegraf-influxdb-grafana.git
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 LABOUARDY Mohamed
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Terraform AWS Use cases
2 | Terraform template for AWS provider
3 |
4 | # How to use
5 |
6 | - Set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY, environment variables
7 |
8 | ```
9 | $ export AWS_ACCESS_KEY_ID="YOUR ACCESS KEY ID"
10 | $ export AWS_SECRET_ACCESS_KEY="YOUR SECRET ACCESS KEY"
11 | ```
12 |
13 | # Tutorials
14 |
15 | * Setting up an etcd cluster on AWS using CoreOS & Terraform
16 | * Setup Docker Swarm on AWS using Ansible & Terraform
17 | * Manage AWS VPC as Infrastructure as Code with Terraform
18 | * Manage AWS Infrastracture as Code with Terraform
19 | * Highly Available Bastion Hosts with Route53
20 | * Highly Available Docker Registry on AWS with Nexus
21 |
--------------------------------------------------------------------------------
/autoscalling-elb/main.tf:
--------------------------------------------------------------------------------
1 | # Define launch configuration
2 |
3 | resource "aws_launch_configuration" "previously_webcluster" {
4 | name = "previously_webcluster"
5 | image_id = "${var.ami-webserver}"
6 | instance_type = "${var.instance_type}"
7 |
8 | lifecycle {
9 | create_before_destroy = true
10 | }
11 | }
12 |
13 | resource "aws_autoscaling_group" "previously_asg" {
14 | name = "previously_asg"
15 | launch_configuration = "${aws_launch_configuration.previously_webcluster.name}"
16 | min_size = 2
17 | max_size = 3
18 |
19 | lifecycle {
20 | create_before_destroy = true
21 | }
22 | }
23 |
24 | resource "aws_elb" "previously_elb" {
25 | name = "previously_elb"
26 | availability_zone = ["us-west-2a", "us-west-2b"]
27 | subnets = ["${aws_subnet.previously_private_us_west_2a.id}", "${aws_subnet.previously_private_us_west_2b.id}"]
28 | security_groups = ["${aws_security_group.previously_elb_sg.id}"]
29 |
30 | listener {
31 | instance_port = 80
32 | instance_protocol = "http"
33 | lb_port = 80
34 | lb_protocol = "http"
35 | }
36 |
37 | health_check {
38 | healthy_threshold = 2
39 | unhealthy_threshold = 2
40 | timeout = 3
41 | target = "HTTP:80/"
42 | interval = 30
43 | }
44 |
45 | cross_zone_load_balancing = true
46 | idle_timeout = 400
47 | connection_draining = true
48 | connection_draining_timeout = 400
49 |
50 | tags {
51 | Name = "previously_elb"
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/bastion-highavailability/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/bastion-highavailability/dns.tf:
--------------------------------------------------------------------------------
1 | resource "aws_route53_record" "bastion-slowcoder" {
2 | zone_id = "${var.zone_id}"
3 | name = "bastion.slowcoder.com"
4 | type = "A"
5 | ttl = "330"
6 | records = ["${aws_eip.bastion-1a-eip.public_ip}", "${aws_eip.bastion-1b-eip.public_ip}"]
7 | }
8 |
--------------------------------------------------------------------------------
/bastion-highavailability/outputs.tf:
--------------------------------------------------------------------------------
1 | output "BASTION-1" {
2 | value = "${aws_eip.bastion-1a-eip.public_ip}"
3 | }
4 |
5 | output "BASTION-2" {
6 | value = "${aws_eip.bastion-1b-eip.public_ip}"
7 | }
8 |
9 | output "PRIVATE-EC2" {
10 | value = "${aws_instance.private-ec2.private_ip}"
11 | }
12 |
--------------------------------------------------------------------------------
/bastion-highavailability/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | }
4 |
--------------------------------------------------------------------------------
/bastion-highavailability/resources.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "bastion-1a" {
2 | ami = "${lookup(var.amis, var.region)}"
3 | instance_type = "${var.instance_type}"
4 | key_name = "${var.key_name}"
5 | subnet_id = "${aws_subnet.us-east-1a-public.id}"
6 | associate_public_ip_address = true
7 |
8 | tags {
9 | Name = "bastion-1a"
10 | }
11 | }
12 |
13 | resource "aws_eip" "bastion-1a-eip" {
14 | instance = "${aws_instance.bastion-1a.id}"
15 | vpc = true
16 | }
17 |
18 | resource "aws_instance" "bastion-1b" {
19 | ami = "${lookup(var.amis, var.region)}"
20 | instance_type = "${var.instance_type}"
21 | key_name = "${var.key_name}"
22 | subnet_id = "${aws_subnet.us-east-1b-public.id}"
23 | associate_public_ip_address = true
24 |
25 | tags {
26 | Name = "bastion-1b"
27 | }
28 | }
29 |
30 | resource "aws_eip" "bastion-1b-eip" {
31 | instance = "${aws_instance.bastion-1b.id}"
32 | vpc = true
33 | }
34 |
35 | resource "aws_instance" "private-ec2" {
36 | ami = "${lookup(var.amis, var.region)}"
37 | instance_type = "${var.instance_type}"
38 | key_name = "${var.key_name}"
39 | subnet_id = "${aws_subnet.us-east-1a-private.id}"
40 |
41 | tags {
42 | Name = "private-ec2"
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/bastion-highavailability/variables.tf:
--------------------------------------------------------------------------------
1 | variable "key_name" {}
2 | variable "zone_id" {}
3 |
4 | variable "region" {
5 | description = "AWS Region"
6 | default = "us-east-1"
7 | }
8 |
9 | variable "vpc_cidr" {
10 | description = "VPC CIDR Block"
11 | default = "10.0.0.0/16"
12 | }
13 |
14 | variable "us_east_1a_public_cidr" {
15 | description = "CIDR for the public subnet"
16 | default = "10.0.1.0/24"
17 | }
18 |
19 | variable "us_east_1b_public_cidr" {
20 | description = "CIDR for the public subnet"
21 | default = "10.0.2.0/24"
22 | }
23 |
24 | variable "us_east_1a_private_cidr" {
25 | description = "CIDR for the public subnet"
26 | default = "10.0.3.0/24"
27 | }
28 |
29 | variable "availability_zones" {
30 | type = "map"
31 | description = "Availability Zones by CIDR"
32 | default = {
33 | "10.0.1.0/24" = "us-east-1a"
34 | "10.0.2.0/24" = "us-east-1b"
35 | "10.0.3.0/24" = "us-east-1a"
36 | }
37 | }
38 |
39 | variable "amis" {
40 | type = "map"
41 | description = "AMIs by region"
42 | default = {
43 | us-east-1 = "ami-4fffc834"
44 | }
45 | }
46 |
47 | variable "instance_type" {
48 | description = "EC2 instance type"
49 | default = "t2.micro"
50 | }
51 |
--------------------------------------------------------------------------------
/bastion-highavailability/variables.tfvars:
--------------------------------------------------------------------------------
1 | key_name=""
2 | zone_id=""
3 |
--------------------------------------------------------------------------------
/bastion-highavailability/vpc.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc" "default" {
2 | cidr_block = "${var.vpc_cidr}"
3 | enable_dns_hostnames = true
4 |
5 | tags {
6 | Name = "testing"
7 | }
8 | }
9 |
10 | resource "aws_subnet" "us-east-1a-public" {
11 | vpc_id = "${aws_vpc.default.id}"
12 | cidr_block = "${var.us_east_1a_public_cidr}"
13 | availability_zone = "${lookup(var.availability_zones, var.us_east_1a_public_cidr)}"
14 |
15 | tags {
16 | Name = "us-east-1a-public"
17 | }
18 | }
19 |
20 | resource "aws_subnet" "us-east-1b-public" {
21 | vpc_id = "${aws_vpc.default.id}"
22 | cidr_block = "${var.us_east_1b_public_cidr}"
23 | availability_zone = "${lookup(var.availability_zones, var.us_east_1b_public_cidr)}"
24 |
25 | tags {
26 | Name = "us-east-1b-public"
27 | }
28 | }
29 |
30 | resource "aws_subnet" "us-east-1a-private" {
31 | vpc_id = "${aws_vpc.default.id}"
32 | cidr_block = "${var.us_east_1a_private_cidr}"
33 | availability_zone = "${lookup(var.availability_zones, var.us_east_1a_private_cidr)}"
34 |
35 | tags {
36 | Name = "us-east-1a-private"
37 | }
38 | }
39 |
40 | resource "aws_internet_gateway" "default" {
41 | vpc_id = "${aws_vpc.default.id}"
42 |
43 | tags {
44 | Name = "igw"
45 | }
46 | }
47 |
48 | resource "aws_route_table" "default" {
49 | vpc_id = "${aws_vpc.default.id}"
50 |
51 | route {
52 | cidr_block = "0.0.0.0/0"
53 | gateway_id = "${aws_internet_gateway.default.id}"
54 | }
55 |
56 | tags {
57 | Name = "public-rt"
58 | }
59 | }
60 |
61 | resource "aws_route_table_association" "1a-public-rt" {
62 | subnet_id = "${aws_subnet.us-east-1a-public.id}"
63 | route_table_id = "${aws_route_table.default.id}"
64 | }
65 |
66 | resource "aws_route_table_association" "1b-public-rt" {
67 | subnet_id = "${aws_subnet.us-east-1b-public.id}"
68 | route_table_id = "${aws_route_table.default.id}"
69 | }
70 |
--------------------------------------------------------------------------------
/ci-pipeline/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mlabouardy/terraform-aws-labs/945a5242405f9faba893fd180f27d82da5ff1c12/ci-pipeline/README.md
--------------------------------------------------------------------------------
/ci-pipeline/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | access_key = "${var.access_key}"
4 | secret_key = "${var.secret_key}"
5 | }
6 |
7 | // SSH KeyPair
8 | resource "aws_key_pair" "default" {
9 | key_name = "registry"
10 | public_key = "${file("${var.ssh_public_key}")}"
11 | }
12 |
13 | // Jenkins Master
14 | resource "aws_instance" "ci-master" {
15 | ami = "${lookup()}"
16 | instance_type = "${var.instance_type}"
17 | key_name = "${aws_key_pair.default.id}"
18 | security_groups = ["${aws_security_group.default.name}"]
19 |
20 | user_data = "${file("setup.sh")}"
21 |
22 | tags {
23 | Name = "ci-master"
24 | }
25 | }
26 |
27 | // Jenkins Slave
28 | resource "aws_instance" "ci-slave" {
29 | ami = "${lookup()}"
30 | instance_type = "${var.instance_type}"
31 | key_name = "${aws_key_pair.default.id}"
32 |
33 | tags {
34 | Name = "ci-slave"
35 | }
36 | }
37 |
38 | // Deployment Environment
39 | resource "aws_instance" "node" {
40 | count = 3
41 | ami = "${lookup()}"
42 | instance_type = "${var.instance_type}"
43 | key_name = "${aws_key_pair.default.id}"
44 |
45 | tags {
46 | Name = "node-${count.index}"
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/ci-pipeline/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum update -y
3 | yum install -y docker
4 | service docker start
5 | usermod -aG docker ec2-user
6 |
--------------------------------------------------------------------------------
/docker-registry/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | secret_key = "${var.secret_key}"
4 | access_key = "${var.access_key}"
5 | }
6 |
7 | resource "aws_security_group" "default" {
8 | name = "registry_security_group"
9 | description = "Allow access to Nexus dashboard & traffic on port 5000"
10 |
11 | ingress {
12 | from_port = 22
13 | to_port = 22
14 | protocol = "tcp"
15 | cidr_blocks = ["0.0.0.0/0"]
16 | }
17 |
18 | ingress {
19 | from_port = 8081
20 | to_port = 8081
21 | protocol = "tcp"
22 | cidr_blocks = ["0.0.0.0/0"]
23 | }
24 |
25 | ingress {
26 | from_port = 5000
27 | to_port = 5000
28 | protocol = "tcp"
29 | cidr_blocks = ["0.0.0.0/0"]
30 | }
31 |
32 | egress {
33 | from_port = 0
34 | to_port = 0
35 | protocol = -1
36 | cidr_blocks = ["0.0.0.0/0"]
37 | }
38 |
39 | tags {
40 | Name = "registry_security_group"
41 | }
42 | }
43 |
44 | resource "aws_key_pair" "default" {
45 | key_name = "registry"
46 | public_key = "${file("${var.ssh_public_key}")}"
47 | }
48 |
49 | resource "aws_eip" "default" {
50 | instance = "${aws_instance.default.id}"
51 | vpc = true
52 | }
53 |
54 | resource "aws_instance" "default" {
55 | ami = "${lookup(var.amis, var.region)}"
56 | instance_type = "${var.instance_type}"
57 | key_name = "${aws_key_pair.default.id}"
58 | security_groups = ["${aws_security_group.default.name}"]
59 |
60 | user_data = "${file("setup.sh")}"
61 |
62 | tags {
63 | Name = "registry"
64 | }
65 | }
66 |
67 | resource "aws_route53_record" "default" {
68 | zone_id = "${var.dns_zone_id}"
69 | name = "${var.dns_name}"
70 | type = "A"
71 | ttl = "300"
72 | records = ["${aws_eip.default.public_ip}"]
73 | }
74 |
--------------------------------------------------------------------------------
/docker-registry/outputs.tf:
--------------------------------------------------------------------------------
1 | output "Registry IP" {
2 | value = "${aws_eip.default.public_ip}"
3 | }
4 |
--------------------------------------------------------------------------------
/docker-registry/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum update -y
3 | yum install -y docker
4 | service docker start
5 | usermod -aG docker ec2-user
6 | docker swarm init
7 | docker service create --replicas 1 --name registry --publish 5000:5000 --publish 8081:8081 sonatype/nexus3:3.6.2
8 |
--------------------------------------------------------------------------------
/docker-registry/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "secret_key" {}
3 | variable "access_key" {}
4 | variable "ssh_public_key" {}
5 | variable "dns_zone_id" {}
6 |
7 | variable "instance_type" {
8 | default = "t2.medium"
9 | }
10 |
11 | variable "amis" {
12 | type = "map"
13 | description = "Amazon Linux Image"
14 |
15 | default = {
16 | "us-east-1" = "ami-55ef662f"
17 | "us-east-2" = "ami-15e9c770"
18 | "eu-west-2" = "ami-e7d6c983"
19 | "eu-west-1" = "ami-1a962263"
20 | "ap-south-1" = "ami-d5c18eba"
21 | }
22 | }
23 |
24 | variable "dns_name" {
25 | default = "registry.slowcoder.com"
26 | }
27 |
--------------------------------------------------------------------------------
/docker-registry/variables.tfvars:
--------------------------------------------------------------------------------
1 | region = "YOUR AWS REGION"
2 | secret_key = "YOUR AWS SECRET KEY"
3 | access_key = "YOUR AWS ACCESS KEY ID"
4 | ssh_public_key = "YOUR SSH PUBLIC KEY (.pub)"
5 | dns_zone_id = "YOUR ROUTE53 DNS ZONE ID"
6 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | How to setup a docker swarm cluster in action is shown below:
6 |
7 | [](https://asciinema.org/a/135278)
8 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/hosts:
--------------------------------------------------------------------------------
1 | [masters]
2 |
3 | [workers]
4 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/install-docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum update
3 | yum install -y docker
4 | service docker start
5 | usermod -aG docker ec2-user
6 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/outputs.tf:
--------------------------------------------------------------------------------
1 | output "MASTER" {
2 | value = "${aws_instance.master.public_ip}"
3 | }
4 |
5 | output "WORKER1" {
6 | value = "${aws_instance.worker1.public_ip}"
7 | }
8 |
9 | output "WORKER2" {
10 | value = "${aws_instance.worker2.public_ip}"
11 | }
12 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Init Swarm Master
3 | hosts: masters
4 | gather_facts: False
5 | remote_user: ec2-user
6 | tasks:
7 | - name: Swarm Init
8 | command: docker swarm init --advertise-addr {{ inventory_hostname }}
9 |
10 | - name: Get Worker Token
11 | command: docker swarm join-token worker -q
12 | register: worker_token
13 |
14 | - name: Show Worker Token
15 | debug: var=worker_token.stdout
16 |
17 | - name: Master Token
18 | command: docker swarm join-token manager -q
19 | register: master_token
20 |
21 | - name: Show Master Token
22 | debug: var=master_token.stdout
23 |
24 | - name: Join Swarm Cluster
25 | hosts: workers
26 | remote_user: ec2-user
27 | gather_facts: False
28 | vars:
29 | token: "{{ hostvars[groups['masters'][0]]['worker_token']['stdout'] }}"
30 | master: "{{ hostvars[groups['masters'][0]]['inventory_hostname'] }}"
31 | tasks:
32 | - name: Join Swarm Cluster as a Worker
33 | command: docker swarm join --token {{ token }} {{ master }}:2377
34 | register: worker
35 |
36 | - name: Show Results
37 | debug: var=worker.stdout
38 |
39 | - name: Show Errors
40 | debug: var=worker.stderr
41 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.aws_region}"
3 | }
4 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/resources.tf:
--------------------------------------------------------------------------------
1 | resource "aws_key_pair" "default"{
2 | key_name = "clusterkp"
3 | public_key = "${file("${var.key_path}")}"
4 | }
5 |
6 | resource "aws_instance" "master" {
7 | ami = "${var.ami}"
8 | instance_type = "${var.instance_type}"
9 | key_name = "${aws_key_pair.default.id}"
10 | user_data = "${file("${var.bootstrap_path}")}"
11 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
12 |
13 | tags {
14 | Name = "master"
15 | }
16 | }
17 |
18 | resource "aws_instance" "worker1" {
19 | ami = "${var.ami}"
20 | instance_type = "${var.instance_type}"
21 | key_name = "${aws_key_pair.default.id}"
22 | user_data = "${file("${var.bootstrap_path}")}"
23 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
24 |
25 | tags {
26 | Name = "worker 1"
27 | }
28 | }
29 |
30 | resource "aws_instance" "worker2" {
31 | ami = "${var.ami}"
32 | instance_type = "${var.instance_type}"
33 | key_name = "${aws_key_pair.default.id}"
34 | user_data = "${file("${var.bootstrap_path}")}"
35 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
36 |
37 | tags {
38 | Name = "worker 2"
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/security_groups.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "default" {
2 | name = "sgswarmcluster"
3 |
4 | # Allow all inbound
5 | ingress {
6 | from_port = 0
7 | to_port = 65535
8 | protocol = "tcp"
9 | cidr_blocks = ["0.0.0.0/0"]
10 | }
11 |
12 | egress {
13 | from_port = 0
14 | to_port = 65535
15 | protocol = "tcp"
16 | cidr_blocks = ["0.0.0.0/0"]
17 | }
18 |
19 | # Enable ICMP
20 | ingress {
21 | from_port = -1
22 | to_port = -1
23 | protocol = "icmp"
24 | cidr_blocks = ["0.0.0.0/0"]
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/docker-swarm-cluster/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_region" {
2 | description = "AWS region on which we will setup the swarm cluster"
3 | default = "us-east-1"
4 | }
5 |
6 | variable "ami" {
7 | description = "Amazon Linux AMI"
8 | default = "ami-4fffc834"
9 | }
10 |
11 | variable "instance_type" {
12 | description = "Instance type"
13 | default = "t2.micro"
14 | }
15 |
16 | variable "key_path" {
17 | description = "SSH Public Key path"
18 | default = "/home/core/.ssh/id_rsa.pub"
19 | }
20 |
21 | variable "bootstrap_path" {
22 | description = "Script to install Docker Engine"
23 | default = "install-docker.sh"
24 | }
25 |
--------------------------------------------------------------------------------
/ec2-elb/bootstrap-server1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum install -y httpd
3 | service start httpd
4 | chkconfig httpd on
5 | echo "hello world server 1" > /var/www/html/index.html
6 |
--------------------------------------------------------------------------------
/ec2-elb/bootstrap-server2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum install -y httpd
3 | service start httpd
4 | chkconfig httpd on
5 | echo "hello world server 2" > /var/www/html/index.html
6 |
--------------------------------------------------------------------------------
/ec2-elb/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | }
4 |
5 | resource "aws_security_group" "default" {
6 | name = "ec2-elb-sg"
7 |
8 | ingress {
9 | from_port = 0
10 | to_port = 65535
11 | protocol = "tcp"
12 | cidr_blocks = ["0.0.0.0/0"]
13 | }
14 |
15 | egress {
16 | from_port = 0
17 | to_port = 65535
18 | protocol = "tcp"
19 | cidr_blocks = ["0.0.0.0/0"]
20 | }
21 |
22 | ingress {
23 | from_port = -1
24 | to_port = -1
25 | protocol = "icmp"
26 | cidr_blocks = ["0.0.0.0/0"]
27 | }
28 | }
29 |
30 | resource "aws_key_pair" "default" {
31 | key_name = "ec2-elb-key"
32 | public_key = "${file("${var.key_path}")}"
33 | }
34 |
35 | resource "aws_instance" "server1" {
36 | ami = "${var.ami}"
37 | instance_type = "${var.instance_type}"
38 | key_name = "${aws_key_pair.default.id}"
39 | security_groups = ["${aws_security_group.default.name}"]
40 | user_data = "${file("bootstrap-server1.sh")}"
41 |
42 | tags {
43 | Name = "server1"
44 | }
45 | }
46 |
47 | resource "aws_instance" "server2" {
48 | ami = "${var.ami}"
49 | instance_type = "${var.instance_type}"
50 | key_name = "${aws_key_pair.default.id}"
51 | security_groups = ["${aws_security_group.default.name}"]
52 | user_data = "${file("bootstrap-server2.sh")}"
53 |
54 | tags {
55 | Name = "server2"
56 | }
57 | }
58 |
59 | resource "aws_elb" "default" {
60 | name = "ec2-elb"
61 | instances = ["${aws_instance.server1.id}", "${aws_instance.server2.id}"]
62 | availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
63 |
64 | listener {
65 | instance_port = 80
66 | instance_protocol = "tcp"
67 | lb_port = 80
68 | lb_protocol = "tcp"
69 | }
70 |
71 | health_check {
72 | target = "HTTP:80/"
73 | healthy_threshold = 2
74 | unhealthy_threshold = 2
75 | interval = 30
76 | timeout = 5
77 | }
78 |
79 | tags {
80 | Name = "ec2-elb"
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/ec2-elb/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "AWS Region"
3 | default = "us-east-1"
4 | }
5 |
6 | variable "key_path" {
7 | description = "Public key path"
8 | default = "/root/.ssh/id_rsa.pub"
9 | }
10 |
11 | variable "ami" {
12 | description = "AMI"
13 | default = "ami-4fffc834"
14 | }
15 |
16 | variable "instance_type" {
17 | description = "EC2 instance type"
18 | default = "t2.micro"
19 | }
20 |
--------------------------------------------------------------------------------
/etcd-cluster/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | How to setup an etcd cluster on AWS is shown below:
6 |
7 | [](https://asciinema.org/a/135407)
8 |
--------------------------------------------------------------------------------
/etcd-cluster/cloud-config.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | write_files:
4 | - path: /tmp/done
5 | owner: core:core
6 | permissions: 0644
7 | content: |
8 | Cloud config has been provisionned
9 |
10 | coreos:
11 | etcd2:
12 | discovery: "https://discovery.etcd.io/b0d368f43a40eeb76b98efea9bb4055c"
13 | advertise-client-urls: "http://$private_ipv4:2379"
14 | initial-advertise-peer-urls: "http://$private_ipv4:2380"
15 | listen-client-urls: "http://$private_ipv4:2379,http://127.0.0.1:2379"
16 | listen-peer-urls: "http://$private_ipv4:2380"
17 | update:
18 | reboot-strategy: off
19 | units:
20 | - name: etcd2.service
21 | command: start
22 |
--------------------------------------------------------------------------------
/etcd-cluster/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | }
4 |
--------------------------------------------------------------------------------
/etcd-cluster/resources.tf:
--------------------------------------------------------------------------------
1 | resource "aws_key_pair" "default" {
2 | key_name = "etcdcluster"
3 | public_key = "${file("${var.key_path}")}"
4 | }
5 |
6 | resource "aws_instance" "node1" {
7 | ami = "${var.ami}"
8 | instance_type = "${var.instance_type}"
9 | key_name = "${aws_key_pair.default.id}"
10 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
11 | user_data = "${file("cloud-config.yml")}"
12 |
13 | tags {
14 | Name = "node1"
15 | }
16 | }
17 |
18 | resource "aws_instance" "node2" {
19 | ami = "${var.ami}"
20 | instance_type = "${var.instance_type}"
21 | key_name = "${aws_key_pair.default.id}"
22 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
23 | user_data = "${file("cloud-config.yml")}"
24 |
25 | tags {
26 | Name = "node2"
27 | }
28 | }
29 |
30 | resource "aws_instance" "node3" {
31 | ami = "${var.ami}"
32 | instance_type = "${var.instance_type}"
33 | key_name = "${aws_key_pair.default.id}"
34 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
35 | user_data = "${file("cloud-config.yml")}"
36 |
37 | tags {
38 | Name = "node3"
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/etcd-cluster/security_groups.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "default" {
2 | name = "etcdclustersg"
3 |
4 | ingress {
5 | from_port = 0
6 | to_port = 65535
7 | protocol = "tcp"
8 | cidr_blocks = ["0.0.0.0/0"]
9 | }
10 |
11 | egress {
12 | from_port = 0
13 | to_port = 65535
14 | protocol = "tcp"
15 | cidr_blocks = ["0.0.0.0/0"]
16 | }
17 |
18 | ingress {
19 | from_port = -1
20 | to_port = -1
21 | protocol = "icmp"
22 | cidr_blocks = ["0.0.0.0/0"]
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/etcd-cluster/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "AWS Region"
3 | default = "us-east-1"
4 | }
5 |
6 | variable "ami" {
7 | description = "CoreOS AMI"
8 | default = "ami-38714c43"
9 | }
10 |
11 | variable "instance_type" {
12 | description = "Instance type"
13 | default = "t1.micro"
14 | }
15 |
16 | variable "key_path" {
17 | description = "SSH public key path"
18 | default = "/home/core/.ssh/id_rsa.pub"
19 | }
20 |
--------------------------------------------------------------------------------
/linuxkit-aws/aws.yml:
--------------------------------------------------------------------------------
1 | kernel:
2 | image: linuxkit/kernel:4.9.39
3 | cmdline: "console=ttyS0"
4 | init:
5 | - linuxkit/init:838b772355a8690143b37de1cdd4ac5db725271f
6 | - linuxkit/runc:d5cbeb95bdafedb82ad2cf11cff1a5da7fcae630
7 | - linuxkit/containerd:e33e0534d6fca88e1eb86897a1ea410b4a5d722e
8 | - linuxkit/ca-certificates:67acf038c44bb191ebb704ec7bb39a1524052cdf
9 | onboot:
10 | - name: sysctl
11 | image: linuxkit/sysctl:d1a43c7c91e92374766f962dc8534cf9508756b0
12 | - name: dhcpcd
13 | image: linuxkit/dhcpcd:17423c1ccced74e3c005fd80486e8177841fe02b
14 | command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
15 | - name: metadata
16 | image: linuxkit/metadata:f5d4299909b159db35f72547e4ae70bd76c42c6c
17 | services:
18 | - name: rngd
19 | image: linuxkit/rngd:1516d5d70683a5d925fe475eb1b6164a2f67ac3b
20 | - name: sshd
21 | image: linuxkit/sshd:5dc5c3c4470c85f6c89f0e26b9d477ae4ff85a3c
22 | binds:
23 | - /var/config/ssh/authorized_keys:/root/.ssh/authorized_keys
24 | trust:
25 | org:
26 | - linuxkit
27 | - library
28 |
--------------------------------------------------------------------------------
/linuxkit-aws/files/assume-role-policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "Principal": { "Service": "vmie.amazonaws.com" },
7 | "Action": "sts:AssumeRole",
8 | "Condition": {
9 | "StringEquals":{
10 | "sts:Externalid": "vmimport"
11 | }
12 | }
13 | }
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/linuxkit-aws/files/policy.tpl:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Effect": "Allow",
6 | "Action": [
7 | "s3:ListBucket",
8 | "s3:GetBucketLocation"
9 | ],
10 | "Resource": [
11 | "arn:aws:s3:::${bucket}"
12 | ]
13 | },
14 | {
15 | "Effect": "Allow",
16 | "Action": [
17 | "s3:GetObject"
18 | ],
19 | "Resource": [
20 | "arn:aws:s3:::${bucket}/*"
21 | ]
22 | },
23 | {
24 | "Effect": "Allow",
25 | "Action":[
26 | "ec2:ModifySnapshotAttribute",
27 | "ec2:CopySnapshot",
28 | "ec2:RegisterImage",
29 | "ec2:Describe*"
30 | ],
31 | "Resource": "*"
32 | }
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/linuxkit-aws/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | data "template_file" "policy" {
6 | template = "${file("files/policy.tpl")}"
7 | vars {
8 | bucket = "${aws_s3_bucket.disk_image_bucket.id}"
9 | }
10 | }
11 |
12 | ################## S3 ###################
13 |
14 | resource "aws_s3_bucket" "disk_image_bucket" {
15 | bucket_prefix = "vmimport"
16 | }
17 |
18 | ################## IAM ##################
19 |
20 | resource "aws_iam_role" "vmimport" {
21 | name = "vmimport"
22 | assume_role_policy = "${file("files/assume-role-policy.json")}"
23 | }
24 |
25 |
26 | resource "aws_iam_role_policy" "import_disk_image" {
27 | name = "import_disk_image"
28 | role = "${aws_iam_role.vmimport.name}"
29 | policy = "${data.template_file.policy.rendered}"
30 | }
31 |
--------------------------------------------------------------------------------
/single-ec2-instance/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/single-ec2-instance/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | resource "aws_key_pair" "mysshkey" {
6 | key_name = "mysshkey"
7 | public_key = "${file("/home/core/.ssh/id_rsa.pub")}"
8 | }
9 |
10 | resource "aws_instance" "node1" {
11 | ami = "ami-a4c7edb2"
12 | instance_type = "t2.micro"
13 | key_name = "mysshkey"
14 |
15 | tags {
16 | Name = "node1"
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/telegraf-influxdb-grafana/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum update
3 | yum install -y docker curl
4 | service docker start
5 | usermod -aG docker ec2-user
6 | curl -L https://github.com/docker/compose/releases/download/1.15.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
7 | chmod +x /usr/local/bin/docker-compose
8 |
--------------------------------------------------------------------------------
/telegraf-influxdb-grafana/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | }
4 |
5 | resource "aws_security_group" "default"{
6 | name = "metricssg"
7 |
8 | ingress {
9 | from_port = 0
10 | to_port = 65535
11 | protocol = "tcp"
12 | cidr_blocks = ["0.0.0.0/0"]
13 | }
14 |
15 | egress {
16 | from_port = 0
17 | to_port = 65535
18 | protocol = "tcp"
19 | cidr_blocks = ["0.0.0.0/0"]
20 | }
21 |
22 | ingress {
23 | from_port = -1
24 | to_port = -1
25 | protocol = "icmp"
26 | cidr_blocks = ["0.0.0.0/0"]
27 | }
28 | }
29 |
30 | resource "aws_key_pair" "default" {
31 | key_name = "metricskp"
32 | public_key = "${file("${var.key_path}/id_rsa.pub")}"
33 | }
34 |
35 | resource "aws_instance" "default" {
36 | ami = "${var.ami}"
37 | instance_type = "${var.instance_type}"
38 | key_name = "${aws_key_pair.default.id}"
39 | security_groups = ["${aws_security_group.default.name}"]
40 | user_data = "${file("${var.bootstrap_script}")}"
41 |
42 | tags {
43 | Name = "athena"
44 | }
45 |
46 | provisioner "file" {
47 | source = "telegraf-influxdb-grafana/"
48 | destination = "/home/ec2-user/"
49 |
50 | connection {
51 | type = "ssh"
52 | user = "ec2-user"
53 | private_key = "${file("${var.key_path}/id_rsa")}"
54 | }
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/telegraf-influxdb-grafana/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "AWS Region"
3 | default = "us-east-1"
4 | }
5 |
6 | variable "ami" {
7 | description = "Amazon Linux Image"
8 | default = "ami-4fffc834"
9 | }
10 |
11 | variable "instance_type" {
12 | description = "Instance type"
13 | default = "t2.micro"
14 | }
15 |
16 | variable "key_path" {
17 | description = "SSH key path"
18 | default = "/home/core/.ssh/"
19 | }
20 |
21 | variable "bootstrap_script" {
22 | description = "Shell script to install docker & compose"
23 | default = "bootstrap.sh"
24 | }
25 |
--------------------------------------------------------------------------------
/tick-stack-ansible/README.md:
--------------------------------------------------------------------------------
1 | # TICK Stack
2 |
3 | * Telegraf
4 | * InfluxDB
5 | * Chronograf
6 | * Kapacitor
7 |
8 |
9 |
10 |
11 |
12 | # How to use
13 |
14 | ## Terraform
15 |
16 | * Update variables.tfvars with your own AWS credentials
17 | * Install AWS plugin:
18 |
19 | ```
20 | $ terraform init
21 | ```
22 |
23 | * Create the AWS resources:
24 |
25 | ```
26 | $ terraform apply -var-file=variables.tfvars
27 | ```
28 |
29 | ## Ansible
30 |
31 | * Install Role:
32 |
33 | ```
34 | $ ansible-galaxy install mlabouardy.tick
35 | ```
36 |
37 | * Execute playbook:
38 |
39 | ```
40 | $ ansible-playbook --private-key=aws.pem -i inventory playbook.yml
41 | ```
42 |
--------------------------------------------------------------------------------
/tick-stack-ansible/ansible/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | remote_user: ubuntu
3 |
--------------------------------------------------------------------------------
/tick-stack-ansible/ansible/inventory:
--------------------------------------------------------------------------------
1 | [servers]
2 |
--------------------------------------------------------------------------------
/tick-stack-ansible/ansible/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup TICK Stack
3 | hosts: servers
4 | remote_user: "{{remote_user}}"
5 | become: yes
6 | become_method: sudo
7 | roles:
8 | - mlabouardy.tick
9 |
--------------------------------------------------------------------------------
/tick-stack-ansible/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mlabouardy/terraform-aws-labs/945a5242405f9faba893fd180f27d82da5ff1c12/tick-stack-ansible/logo.png
--------------------------------------------------------------------------------
/tick-stack-ansible/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | access_key = "${var.access_key}"
4 | secret_key = "${var.secret_key}"
5 | }
6 |
7 |
8 | module "tick_sg" {
9 | source = "github.com/terraform-aws-modules/terraform-aws-security-group"
10 |
11 | name = "${var.sg_name}"
12 | description = "${var.sg_description}"
13 | vpc_id = "${var.vpc_id}"
14 |
15 | ingress_with_cidr_blocks = [
16 | {
17 | from_port = 8083
18 | to_port = 8083
19 | protocol = "tcp"
20 | description = "InfluxDB admin dashboard"
21 | cidr_blocks = "0.0.0.0/0"
22 | },
23 | {
24 | from_port = 8086
25 | to_port = 8086
26 | protocol = "tcp"
27 | description = "InfluxDB API"
28 | cidr_blocks = "0.0.0.0/0"
29 | },
30 | {
31 | from_port = 8888
32 | to_port = 8888
33 | protocol = "tcp"
34 | description = "Chronograf Dashboard"
35 | cidr_blocks = "0.0.0.0/0"
36 | },
37 | {
38 | from_port = 22
39 | to_port = 22
40 | protocol = "tcp"
41 | description = "SSH access"
42 | cidr_blocks = "0.0.0.0/0"
43 | },
44 | ]
45 |
46 | egress_with_cidr_blocks = [
47 | {
48 | from_port = 0
49 | to_port = 65535
50 | protocol = "tcp"
51 | description = "Allow all outbound traffic"
52 | cidr_blocks = "0.0.0.0/0"
53 | },
54 | ]
55 | }
56 |
57 | module "tick_stack" {
58 | source = "github.com/terraform-aws-modules/terraform-aws-ec2-instance"
59 |
60 | name = "${var.hostname}"
61 | ami = "${var.ami}"
62 |
63 | key_name = "${var.key_name}"
64 | instance_type = "${var.instance_type}"
65 | vpc_security_group_ids = ["${module.tick_sg.this_security_group_id}"]
66 |
67 | tags {
68 | Name = "${var.hostname}"
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/tick-stack-ansible/terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "instance_public_dns" {
2 | value = "${module.tick_stack.public_dns[0]}"
3 | }
4 |
--------------------------------------------------------------------------------
/tick-stack-ansible/terraform/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "AWS Region"
3 | }
4 |
5 | variable "access_key" {
6 | description = "AWS Access Key ID"
7 | }
8 |
9 | variable "secret_key" {
10 | description = "AWS Secret Key"
11 | }
12 |
13 | variable "key_name" {
14 | description = "SSH KeyPair"
15 | }
16 |
17 | variable "vpc_id" {
18 | description = "ID of the VPC where to create security group"
19 | }
20 |
21 | variable "hostname" {
22 | description = "EC2 hostname"
23 | default = "tick_stack"
24 | }
25 |
26 | variable "ami" {
27 | description = "Ubuntu Server 16.04 LTS"
28 | default = "ami-da05a4a0"
29 | }
30 |
31 | variable "instance_type" {
32 | description = "EC2 Instance Type"
33 | default = "t2.micro"
34 | }
35 |
36 | variable "sg_name" {
37 | description = "Security Group name"
38 | default = "tick_sg"
39 | }
40 |
41 | variable "sg_description" {
42 | description = "SG description"
43 | default = "Allow InfluxDB, Chronograf & SSH access"
44 | }
45 |
--------------------------------------------------------------------------------
/tick-stack-ansible/terraform/variables.tfvars:
--------------------------------------------------------------------------------
1 | region = "AWS REGION"
2 | access_key = "YOUR AWS ACCESS KEY ID"
3 | secret_key = "YOUR AWS SECRET KEY"
4 | key_name = "YOUR SSH KEY PAIR"
5 | vpc_id = "YOUR VPC ID"
6 |
--------------------------------------------------------------------------------
/vpc-public-private-subnet/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | How to use in action is shown below:
6 |
7 | [](https://asciinema.org/a/134951)
8 |
--------------------------------------------------------------------------------
/vpc-public-private-subnet/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | yum install -y httpd
3 | service start httpd
4 | chkonfig httpd on
5 | echo "Hello from mlabouardy ^^" > /var/www/html/index.html
6 |
--------------------------------------------------------------------------------
/vpc-public-private-subnet/provider.tf:
--------------------------------------------------------------------------------
1 | # Define AWS as our provider
2 | provider "aws" {
3 | region = "${var.aws_region}"
4 | }
5 |
--------------------------------------------------------------------------------
/vpc-public-private-subnet/resources.tf:
--------------------------------------------------------------------------------
1 | # Define SSH key pair for our instances
2 | resource "aws_key_pair" "default" {
3 | key_name = "vpctestkeypair"
4 | public_key = "${file("${var.key_path}")}"
5 | }
6 |
7 | # Define webserver inside the public subnet
8 | resource "aws_instance" "wb" {
9 | ami = "${var.ami}"
10 | instance_type = "t1.micro"
11 | key_name = "${aws_key_pair.default.id}"
12 | subnet_id = "${aws_subnet.public-subnet.id}"
13 | vpc_security_group_ids = ["${aws_security_group.sgweb.id}"]
14 | associate_public_ip_address = true
15 | source_dest_check = false
16 | user_data = "${file("install.sh")}"
17 |
18 | tags {
19 | Name = "webserver"
20 | }
21 | }
22 |
23 | # Define database inside the private subnet
24 | resource "aws_instance" "db" {
25 | ami = "${var.ami}"
26 | instance_type = "t1.micro"
27 | key_name = "${aws_key_pair.default.id}"
28 | subnet_id = "${aws_subnet.private-subnet.id}"
29 | vpc_security_group_ids = ["${aws_security_group.sgdb.id}"]
30 | source_dest_check = false
31 |
32 | tags {
33 | Name = "database"
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/vpc-public-private-subnet/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_region" {
2 | description = "Region for the VPC"
3 | default = "us-east-1"
4 | }
5 |
6 | variable "vpc_cidr" {
7 | description = "CIDR for the VPC"
8 | default = "10.0.0.0/16"
9 | }
10 |
11 | variable "public_subnet_cidr" {
12 | description = "CIDR for the public subnet"
13 | default = "10.0.1.0/24"
14 | }
15 |
16 | variable "private_subnet_cidr" {
17 | description = "CIDR for the private subnet"
18 | default = "10.0.2.0/24"
19 | }
20 |
21 | variable "ami" {
22 | description = "AMI for EC2"
23 | default = "ami-4fffc834"
24 | }
25 |
26 | variable "key_path" {
27 | description = "SSH Public Key path"
28 | default = "/home/core/.ssh/id_rsa.pub"
29 | }
30 |
--------------------------------------------------------------------------------
/vpc-public-private-subnet/vpc.tf:
--------------------------------------------------------------------------------
1 | # Define our VPC
2 | resource "aws_vpc" "default" {
3 | cidr_block = "${var.vpc_cidr}"
4 | enable_dns_hostnames = true
5 |
6 | tags {
7 | Name = "test-vpc"
8 | }
9 | }
10 |
11 | # Define the public subnet
12 | resource "aws_subnet" "public-subnet" {
13 | vpc_id = "${aws_vpc.default.id}"
14 | cidr_block = "${var.public_subnet_cidr}"
15 | availability_zone = "us-east-1a"
16 |
17 | tags {
18 | Name = "Web Public Subnet"
19 | }
20 | }
21 |
22 | # Define the private subnet
23 | resource "aws_subnet" "private-subnet" {
24 | vpc_id = "${aws_vpc.default.id}"
25 | cidr_block = "${var.private_subnet_cidr}"
26 | availability_zone = "us-east-1b"
27 |
28 | tags {
29 | Name = "Database Private Subnet"
30 | }
31 | }
32 |
33 | # Define the internet gateway
34 | resource "aws_internet_gateway" "gw" {
35 | vpc_id = "${aws_vpc.default.id}"
36 |
37 | tags {
38 | Name = "VPC IGW"
39 | }
40 | }
41 |
42 | # Define the route table
43 | resource "aws_route_table" "web-public-rt" {
44 | vpc_id = "${aws_vpc.default.id}"
45 |
46 | route {
47 | cidr_block = "0.0.0.0/0"
48 | gateway_id = "${aws_internet_gateway.gw.id}"
49 | }
50 |
51 | tags {
52 | Name = "Public Subnet RT"
53 | }
54 | }
55 |
56 | # Assign the route table to the public Subnet
57 | resource "aws_route_table_association" "web-public-rt" {
58 | subnet_id = "${aws_subnet.public-subnet.id}"
59 | route_table_id = "${aws_route_table.web-public-rt.id}"
60 | }
61 |
62 | # Define the security group for public subnet
63 | resource "aws_security_group" "sgweb" {
64 | name = "vpc_test_web"
65 | description = "Allow incoming HTTP connections & SSH access"
66 |
67 | ingress {
68 | from_port = 80
69 | to_port = 80
70 | protocol = "tcp"
71 | cidr_blocks = ["0.0.0.0/0"]
72 | }
73 |
74 | ingress {
75 | from_port = 443
76 | to_port = 443
77 | protocol = "tcp"
78 | cidr_blocks = ["0.0.0.0/0"]
79 | }
80 |
81 | ingress {
82 | from_port = -1
83 | to_port = -1
84 | protocol = "icmp"
85 | cidr_blocks = ["0.0.0.0/0"]
86 | }
87 |
88 | ingress {
89 | from_port = 22
90 | to_port = 22
91 | protocol = "tcp"
92 | cidr_blocks = ["0.0.0.0/0"]
93 | }
94 |
95 | vpc_id="${aws_vpc.default.id}"
96 |
97 | tags {
98 | Name = "Web Server SG"
99 | }
100 | }
101 |
102 | # Define the security group for private subnet
103 | resource "aws_security_group" "sgdb"{
104 | name = "sg_test_web"
105 | description = "Allow traffic from public subnet"
106 |
107 | ingress {
108 | from_port = 3306
109 | to_port = 3306
110 | protocol = "tcp"
111 | cidr_blocks = ["${var.public_subnet_cidr}"]
112 | }
113 |
114 | ingress {
115 | from_port = -1
116 | to_port = -1
117 | protocol = "icmp"
118 | cidr_blocks = ["${var.public_subnet_cidr}"]
119 | }
120 |
121 | ingress {
122 | from_port = 22
123 | to_port = 22
124 | protocol = "tcp"
125 | cidr_blocks = ["${var.public_subnet_cidr}"]
126 | }
127 |
128 | vpc_id = "${aws_vpc.default.id}"
129 |
130 | tags {
131 | Name = "DB SG"
132 | }
133 | }
134 |
--------------------------------------------------------------------------------
/wordpress/.gitignore:
--------------------------------------------------------------------------------
1 | terraform.tfvars
2 |
--------------------------------------------------------------------------------
/wordpress/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #/bin/sh
2 | yum install -y httpd php php-zlib php-iconv php-gd php-mbstring php-fileinfo php-curl php-mysql
3 | chkconfig httpd on
4 | cd /var/www
5 | wget www.wordpress.org/latest.zip
6 | unzip latest.zip
7 | rm latest.zip
8 | mv wordpress/* html/
9 | rm -r wordpress
10 | chown -R apache:apache html/
11 | apachectl start
12 |
--------------------------------------------------------------------------------
/wordpress/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | }
4 |
5 | resource "aws_vpc" "default" {
6 | cidr_block = "${var.vpc_cidr_block}"
7 | enable_dns_hostnames = true
8 |
9 | tags {
10 | Name = "vpc-blog"
11 | }
12 | }
13 |
14 | resource "aws_subnet" "public-subnet1" {
15 | cidr_block = "${var.public_subnet1_cidr_block}"
16 | vpc_id = "${aws_vpc.default.id}"
17 | availability_zone = "${var.public_subnet1_az}"
18 |
19 | tags {
20 | Name = "public-subnet-${var.public_subnet1_az}"
21 | }
22 | }
23 |
24 | resource "aws_subnet" "public-subnet2" {
25 | cidr_block = "${var.public_subnet2_cidr_block}"
26 | vpc_id = "${aws_vpc.default.id}"
27 | availability_zone = "${var.public_subnet2_az}"
28 |
29 | tags {
30 | Name = "public-subnet-${var.public_subnet2_az}"
31 | }
32 | }
33 |
34 | resource "aws_subnet" "private-subnet1" {
35 | cidr_block = "${var.private_subnet1_cidr_block}"
36 | vpc_id = "${aws_vpc.default.id}"
37 | availability_zone = "${var.private_subnet1_az}"
38 |
39 | tags {
40 | Name = "private-subnet-${var.private_subnet1_az}"
41 | }
42 | }
43 |
44 | resource "aws_subnet" "private-subnet2" {
45 | cidr_block = "${var.private_subnet2_cidr_block}"
46 | vpc_id = "${aws_vpc.default.id}"
47 | availability_zone = "${var.private_subnet2_az}"
48 |
49 | tags {
50 | Name = "private-subnet-${var.private_subnet2_az}"
51 | }
52 | }
53 |
54 | resource "aws_internet_gateway" "igw" {
55 | vpc_id = "${aws_vpc.default.id}"
56 |
57 | tags {
58 | Name = "WP Internet Gateway"
59 | }
60 | }
61 |
62 | resource "aws_route_table" "default" {
63 | vpc_id = "${aws_vpc.default.id}"
64 |
65 | route {
66 | cidr_block = "0.0.0.0/0"
67 | gateway_id = "${aws_internet_gateway.igw.id}"
68 | }
69 |
70 | tags {
71 | Name = "Route table for Public subnet"
72 | }
73 | }
74 |
75 | resource "aws_route_table_association" "rt-asso-public-subnet1" {
76 | subnet_id = "${aws_subnet.public-subnet1.id}"
77 | route_table_id = "${aws_route_table.default.id}"
78 | }
79 |
80 | resource "aws_route_table_association" "rt-asso-public-subnet2" {
81 | subnet_id = "${aws_subnet.public-subnet2.id}"
82 | route_table_id = "${aws_route_table.default.id}"
83 | }
84 |
85 | resource "aws_security_group" "wpsg" {
86 | name = "wpsg"
87 | description = "Allow Incoming HTTP traffic"
88 | vpc_id = "${aws_vpc.default.id}"
89 |
90 | ingress {
91 | from_port = 80
92 | to_port = 80
93 | protocol = "tcp"
94 | cidr_blocks = ["0.0.0.0/0"]
95 | }
96 |
97 | ingress {
98 | from_port = 22
99 | to_port = 22
100 | protocol = "tcp"
101 | cidr_blocks = ["0.0.0.0/0"]
102 | }
103 |
104 | egress {
105 | from_port = 0
106 | to_port = 0
107 | protocol = "-1"
108 | cidr_blocks = ["0.0.0.0/0"]
109 | }
110 |
111 | tags {
112 | Name = "blog-security-group"
113 | }
114 | }
115 |
116 | resource "aws_security_group" "elbsg" {
117 | name = "elbsg"
118 | description = "Allow Incoming HTTP traffic"
119 | vpc_id = "${aws_vpc.default.id}"
120 |
121 | ingress {
122 | from_port = 80
123 | to_port = 80
124 | protocol = "tcp"
125 | cidr_blocks = ["0.0.0.0/0"]
126 | }
127 |
128 | egress {
129 | from_port = 0
130 | to_port = 0
131 | protocol = "-1"
132 | cidr_blocks = ["0.0.0.0/0"]
133 | }
134 |
135 | tags {
136 | Name = "elb-security-group"
137 | }
138 | }
139 |
140 | resource "aws_security_group" "dbsg" {
141 | name = "dbsg"
142 | description = "Allow access to MySQL from WP"
143 | vpc_id = "${aws_vpc.default.id}"
144 |
145 | ingress {
146 | from_port = 3306
147 | to_port = 3306
148 | protocol = "tcp"
149 | security_groups = ["${aws_security_group.wpsg.id}"]
150 | }
151 |
152 | tags {
153 | Name = "db-security-group"
154 | }
155 | }
156 |
157 | resource "aws_key_pair" "default" {
158 | key_name = "blogkey"
159 | public_key = "${file("${var.key_path}")}"
160 | }
161 |
162 | resource "aws_instance" "wb1" {
163 | ami = "${var.ami}"
164 | instance_type = "${var.instance_type}"
165 | key_name = "${aws_key_pair.default.id}"
166 | user_data = "${file("bootstrap.sh")}"
167 | vpc_security_group_ids = ["${aws_security_group.wpsg.id}"]
168 | subnet_id = "${aws_subnet.public-subnet1.id}"
169 | associate_public_ip_address = true
170 |
171 | tags {
172 | Name = "wordpress-${var.public_subnet1_az}"
173 | }
174 | }
175 |
176 | resource "aws_instance" "wb2" {
177 | ami = "${var.ami}"
178 | instance_type = "${var.instance_type}"
179 | key_name = "${aws_key_pair.default.id}"
180 | user_data = "${file("bootstrap.sh")}"
181 | vpc_security_group_ids = ["${aws_security_group.wpsg.id}"]
182 | subnet_id = "${aws_subnet.public-subnet2.id}"
183 | associate_public_ip_address = true
184 |
185 | tags {
186 | Name = "wordpress-${var.public_subnet2_az}"
187 | }
188 | }
189 |
190 | resource "aws_db_subnet_group" "default" {
191 | name = "db-subnet-group"
192 | description = "RDS Subnet Group"
193 | subnet_ids = ["${aws_subnet.private-subnet1.id}", "${aws_subnet.private-subnet2.id}"]
194 |
195 | tags {
196 | Name = "DB Subnet Group"
197 | }
198 | }
199 |
200 | resource "aws_db_instance" "default" {
201 | name = "${var.db_name}"
202 | engine = "${var.engine}"
203 | engine_version = "5.6.35"
204 | storage_type = "gp2"
205 | allocated_storage = 5
206 | instance_class = "db.t2.micro"
207 | username = "${var.db_username}"
208 | password = "${var.db_password}"
209 | vpc_security_group_ids = ["${aws_security_group.dbsg.id}"]
210 | db_subnet_group_name = "${aws_db_subnet_group.default.id}"
211 | }
212 |
213 | resource "aws_elb" "default" {
214 | name = "elbwp"
215 | instances = ["${aws_instance.wb1.id}", "${aws_instance.wb2.id}"]
216 | subnets = ["${aws_subnet.public-subnet1.id}", "${aws_subnet.public-subnet2.id}"]
217 | security_groups = ["${aws_security_group.elbsg.id}"]
218 | cross_zone_load_balancing = true
219 | idle_timeout = 400
220 | connection_draining = true
221 | connection_draining_timeout = 400
222 |
223 | listener {
224 | instance_port = 80
225 | instance_protocol = "tcp"
226 | lb_port = 80
227 | lb_protocol = "tcp"
228 | }
229 |
230 | health_check {
231 | healthy_threshold = 2
232 | unhealthy_threshold = 2
233 | timeout = 3
234 | target = "HTTP:80/"
235 | interval = 30
236 | }
237 | }
238 |
--------------------------------------------------------------------------------
/wordpress/outputs.tf:
--------------------------------------------------------------------------------
1 | output "ELB_DNS" {
2 | value = "${aws_elb.default.dns_name}"
3 | }
4 |
5 | output "Blog_DNS" {
6 | value = "${aws_instance.default.public_dns}"
7 | }
8 |
9 | output "MYSQL_DNS" {
10 | value = "${aws_db_instance.default.dns_name}"
11 | }
12 |
--------------------------------------------------------------------------------
/wordpress/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "VPC Region"
3 | default = "us-east-1"
4 | }
5 |
6 | variable "vpc_cidr_block" {
7 | description = "VPC CIDR"
8 | default = "10.0.0.0/16"
9 | }
10 |
11 | variable "public_subnet1_cidr_block" {
12 | description = "Public Subnet 1 CIDR"
13 | default = "10.0.1.0/24"
14 | }
15 |
16 | variable "public_subnet2_cidr_block" {
17 | description = "Public Subnet 2 CIDR"
18 | default = "10.0.2.0/24"
19 | }
20 |
21 | variable "private_subnet1_cidr_block" {
22 | description = "Private Subnet 1 CIDR"
23 | default = "10.0.3.0/24"
24 | }
25 |
26 | variable "private_subnet2_cidr_block" {
27 | description = "Private Subnet 2 CIDR"
28 | default = "10.0.4.0/24"
29 | }
30 |
31 | variable "public_subnet1_az" {
32 | description = "Public Subnet 1 Availability Zone"
33 | default = "us-east-1a"
34 | }
35 |
36 | variable "public_subnet1_az" {
37 | description = "Public Subnet 2 Availability Zone"
38 | default = "us-east-1b"
39 | }
40 |
41 |
42 | variable "private_subnet1_az" {
43 | description = "Private Subnet 1 Availability Zone"
44 | default = "us-east-1c"
45 | }
46 |
47 | variable "private_subnet2_az" {
48 | description = "Private Subnet 2 Availability Zone"
49 | default = "us-east-1d"
50 | }
51 |
52 | variable "key_path" {
53 | description = "Public Key path"
54 | }
55 |
56 | variable "ami" {
57 | description = "Amazon Linux Image"
58 | default = "ami-4fffc834"
59 | }
60 |
61 | variable "instance_type" {
62 | description = "Server Instance Type"
63 | default = "t2.micro"
64 | }
65 |
66 | variable "engine" {
67 | description = "RDS Engine"
68 | default = "mysql"
69 | }
70 |
71 | variable "db_name" {
72 | description = "Database Name"
73 | default = "mydb"
74 | }
75 |
76 | variable "db_username" {
77 | description = "Database Username"
78 | }
79 |
80 | variable "db_password" {
81 | description = "Database Password"
82 | }
83 |
--------------------------------------------------------------------------------